ufs.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-25 GuEe-GUI the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <mm_page.h>
  14. #include <mm_aspace.h>
  15. #define DBG_TAG "rtdm.ufs"
  16. #define DBG_LVL DBG_INFO
  17. #include <rtdbg.h>
  18. /* UTP slot index used for all SCSI commands (single-threaded transfer) */
  19. #define RT_UFS_SLOT_ID 0
  20. /* Timeout for UTP transfer completion (ms) */
  21. #define RT_UFS_UTP_TIMEOUT_MS 5000
  22. /* UIC command timeout (ms) */
  23. #define RT_UFS_UIC_TIMEOUT_MS 500
  24. /* UTRL/UTMRL base address must be 1KB aligned (UTRLBA/UTMRLBA bits[31:10]). */
  25. #define RT_UFS_UTR_LIST_ALIGN 1024
  26. #define RT_UFS_UTR_LIST_BYTES(n) ((rt_size_t)(n) * sizeof(struct rt_utp_transfer_req_desc))
  27. /* UTRD dword_0 data direction bits */
  28. #define RT_UFS_UTRD_DD_NONE 0x00000000U
  29. #define RT_UFS_UTRD_DD_HOST_TO_DEVICE 0x02000000U
  30. #define RT_UFS_UTRD_DD_DEVICE_TO_HOST 0x04000000U
  31. /* UTRD dword_0 command type bits */
  32. #define RT_UFS_UTRD_CMD_TYPE_SCSI (1U << RT_UFS_UPIU_COMMAND_TYPE_OFFSET)
  33. static rt_uint8_t ufs_scsi_cmd_data_dir(struct rt_scsi_cmd *cmd)
  34. {
  35. switch (cmd->op.unknow.opcode)
  36. {
  37. case RT_SCSI_CMD_READ10:
  38. case RT_SCSI_CMD_READ12:
  39. case RT_SCSI_CMD_READ16:
  40. case RT_SCSI_CMD_READ_CAPACITY10:
  41. case RT_SCSI_CMD_READ_CAPACITY16:
  42. case RT_SCSI_CMD_INQUIRY:
  43. case RT_SCSI_CMD_REQUEST_SENSE:
  44. return RT_UTP_DEVICE_TO_HOST;
  45. case RT_SCSI_CMD_WRITE10:
  46. case RT_SCSI_CMD_WRITE12:
  47. case RT_SCSI_CMD_WRITE16:
  48. return RT_UTP_HOST_TO_DEVICE;
  49. default:
  50. return RT_UTP_NO_DATA_TRANSFER;
  51. }
  52. }
  53. static void ufs_prepare_upiu_cmd(struct rt_ufs_host *ufs, struct rt_utp_upiu_req *req, struct rt_utp_upiu_rsp *rsp,
  54. struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
  55. {
  56. rt_uint16_t cdb_len;
  57. rt_size_t data_len = cmd->data.size;
  58. rt_uint8_t flags = RT_UPIU_CMD_FLAGS_NONE;
  59. if (ufs_scsi_cmd_data_dir(cmd) == RT_UTP_DEVICE_TO_HOST)
  60. {
  61. flags = RT_UPIU_CMD_FLAGS_READ;
  62. }
  63. else if (ufs_scsi_cmd_data_dir(cmd) == RT_UTP_HOST_TO_DEVICE)
  64. {
  65. flags = RT_UPIU_CMD_FLAGS_WRITE;
  66. }
  67. rt_memset(req, 0, sizeof(*req));
  68. req->header.transaction_code = RT_UPIU_TRANSACTION_COMMAND;
  69. req->header.flags = flags;
  70. req->header.lun = (rt_uint8_t)(sdev->lun & 0x7f);
  71. req->header.task_tag = RT_UFS_SLOT_ID;
  72. req->header.command_set_type = RT_UPIU_COMMAND_SET_TYPE_SCSI;
  73. req->header.data_segment_length = rt_cpu_to_be16((rt_uint16_t)data_len);
  74. req->sc.exp_data_transfer_len = rt_cpu_to_be32((rt_uint32_t)data_len);
  75. cdb_len = cmd->op_size < RT_UFS_CDB_SIZE ? (rt_uint16_t)cmd->op_size : RT_UFS_CDB_SIZE;
  76. rt_memcpy(req->sc.cdb, &cmd->op, cdb_len);
  77. rt_memset(rsp, 0, sizeof(*rsp));
  78. }
  79. /* Fill UTRD header and link to UCD; PRDT filled separately. */
  80. static rt_err_t ufs_prepare_utrd(struct rt_ufs_host *ufs, rt_uint8_t data_dir, rt_size_t data_len)
  81. {
  82. rt_uint64_t ucd_phys = ufs->ucd_handle;
  83. rt_uint32_t utp_dd_dword = RT_UFS_UTRD_DD_NONE;
  84. rt_uint16_t resp_off_dw, resp_len_dw, prdt_off_dw, prdt_len_entries;
  85. struct rt_utp_transfer_req_desc *utrd = ufs->utrd;
  86. /* Clear whole descriptor to avoid any leftover fields */
  87. rt_memset(utrd, 0, sizeof(*utrd));
  88. /* UFSHCI: offset/length in DWORDs (4 bytes) */
  89. resp_off_dw = (rt_uint16_t)(RT_UFS_ALIGNED_UPIU_SIZE >> 2);
  90. prdt_off_dw = (rt_uint16_t)((RT_UFS_ALIGNED_UPIU_SIZE * 2) >> 2);
  91. resp_len_dw = (rt_uint16_t)(RT_UFS_ALIGNED_UPIU_SIZE >> 2);
  92. prdt_len_entries = (data_len > 0) ? 1 : 0;
  93. /* RequestDescHeader.dword_0..dword_3 */
  94. if (data_dir == RT_UTP_HOST_TO_DEVICE)
  95. {
  96. utp_dd_dword = RT_UFS_UTRD_DD_HOST_TO_DEVICE;
  97. }
  98. else if (data_dir == RT_UTP_DEVICE_TO_HOST)
  99. {
  100. utp_dd_dword = RT_UFS_UTRD_DD_DEVICE_TO_HOST;
  101. }
  102. /* Always set INT_CMD to ensure UTRCS gets asserted */
  103. utrd->header.dword_0 = rt_cpu_to_le32(RT_UFS_UTRD_CMD_TYPE_SCSI | utp_dd_dword | RT_UFS_UTP_REQ_DESC_INT_CMD);
  104. utrd->header.dword_1 = 0;
  105. utrd->header.dword_2 = rt_cpu_to_le32(RT_UFS_OCS_INVALID_COMMAND_STATUS);
  106. utrd->header.dword_3 = 0;
  107. /* UFSHCI expects LE for descriptor fields */
  108. utrd->command_desc_base_addr_lo = rt_cpu_to_le32(rt_lower_32_bits(ucd_phys));
  109. utrd->command_desc_base_addr_hi = rt_cpu_to_le32(rt_upper_32_bits(ucd_phys));
  110. utrd->response_upiu_offset = rt_cpu_to_le16(resp_off_dw);
  111. utrd->response_upiu_length = rt_cpu_to_le16(resp_len_dw);
  112. utrd->prd_table_offset = rt_cpu_to_le16(prdt_off_dw);
  113. utrd->prd_table_length = rt_cpu_to_le16(prdt_len_entries);
  114. return RT_EOK;
  115. }
  116. /* Fill PRDT with entries for cmd->data.ptr (split by page boundary). */
  117. static rt_err_t ufs_prepare_prdt(struct rt_ufs_host *ufs, const void *data_ptr, rt_size_t len, rt_uint16_t *out_entries)
  118. {
  119. rt_size_t left;
  120. rt_uint8_t *ptr;
  121. rt_uint16_t n = 0;
  122. struct rt_utp_transfer_cmd_desc *ucd = (struct rt_utp_transfer_cmd_desc *)ufs->ucd_base;
  123. struct rt_ufs_sg_entry *prd = ucd->prd_table;
  124. if (out_entries)
  125. {
  126. *out_entries = 0;
  127. }
  128. if (len == 0)
  129. {
  130. return RT_EOK;
  131. }
  132. left = len;
  133. ptr = (rt_uint8_t *)(rt_ubase_t)data_ptr;
  134. while (left)
  135. {
  136. rt_uint64_t addr;
  137. rt_size_t chunk, page_off;
  138. if (n >= RT_UFS_PRDT_ENTRIES_PER_SLOT)
  139. {
  140. LOG_E("%s: UFS: PRDT entries overflow (len=%u left=%u n=%u)", rt_dm_dev_get_name(ufs->parent.dev),
  141. len, left, n);
  142. return -RT_EIO;
  143. }
  144. page_off = ((rt_ubase_t)ptr) & (ARCH_PAGE_SIZE - 1);
  145. chunk = ARCH_PAGE_SIZE - page_off;
  146. if (chunk > left)
  147. {
  148. chunk = left;
  149. }
  150. if (ptr >= ufs->bounce && ptr < (ufs->bounce + ufs->bounce_size))
  151. {
  152. addr = (rt_uint64_t)ufs->bounce_handle + (rt_uint64_t)(ptr - ufs->bounce);
  153. }
  154. else
  155. {
  156. addr = (rt_uint64_t)rt_kmem_v2p(ptr);
  157. }
  158. prd[n].addr = rt_cpu_to_le64(addr);
  159. prd[n].reserved = 0;
  160. prd[n].size = rt_cpu_to_le32((rt_uint32_t)(chunk - 1)); /* 0-based byte count */
  161. ptr += chunk;
  162. left -= chunk;
  163. ++n;
  164. }
  165. if (out_entries)
  166. {
  167. *out_entries = n;
  168. }
  169. return RT_EOK;
  170. }
  171. static rt_err_t ufs_utp_transfer(struct rt_ufs_host *ufs, struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
  172. {
  173. struct rt_utp_transfer_cmd_desc *ucd = (struct rt_utp_transfer_cmd_desc *)ufs->ucd_base;
  174. struct rt_utp_upiu_req *req = (struct rt_utp_upiu_req *)ucd->command_upiu;
  175. struct rt_utp_upiu_rsp *rsp = (struct rt_utp_upiu_rsp *)ucd->response_upiu;
  176. struct rt_utp_transfer_req_desc *utrd = ufs->utrd;
  177. void *regs = ufs->regs;
  178. rt_uint8_t data_dir;
  179. rt_uint32_t is;
  180. rt_err_t err = RT_EOK;
  181. rt_uint16_t prdt_entries = 0;
  182. rt_uint8_t ocs;
  183. rt_bool_t use_bounce = RT_FALSE;
  184. void *orig_ptr = cmd->data.ptr, *resp_buf;
  185. rt_size_t orig_size = cmd->data.size;
  186. void *dma_ptr = cmd->data.ptr;
  187. rt_size_t dma_size = cmd->data.size;
  188. rt_bool_t bounce_copied = RT_FALSE;
  189. data_dir = ufs_scsi_cmd_data_dir(cmd);
  190. /* Basic controller readiness checks to avoid hard timeouts */
  191. if (regs)
  192. {
  193. rt_uint32_t hcs = HWREG32(regs + RT_UFS_REG_HCS);
  194. rt_uint32_t hce = HWREG32(regs + RT_UFS_REG_HCE);
  195. if (!(hce & 0x1) || !(hcs & RT_UFS_REG_HCS_UTRLRDY) || !(hcs & RT_UFS_REG_HCS_UCRDY))
  196. {
  197. LOG_E("%s: UFS not ready for UTP: HCE=%#08x HCS=%#08x (UTRLRDY=%u UCRDY=%u)",
  198. rt_dm_dev_get_name(ufs->parent.dev),
  199. hce, hcs,
  200. (hcs & RT_UFS_REG_HCS_UTRLRDY) ? 1 : 0,
  201. (hcs & RT_UFS_REG_HCS_UCRDY) ? 1 : 0);
  202. return -RT_EIO;
  203. }
  204. }
  205. /*
  206. * Bounce small I/O buffers.
  207. * Many SCSI helper commands use stack-allocated buffers (embedded in rt_scsi_cmd),
  208. * which are unsafe for DMA in Smart/MMU environments. Use a pre-allocated
  209. * DMA-friendly bounce buffer for small transfers.
  210. */
  211. if (cmd->data.ptr && cmd->data.size && ufs->bounce && ufs->bounce_size && cmd->data.size <= ufs->bounce_size)
  212. {
  213. use_bounce = RT_TRUE;
  214. if (data_dir == RT_UTP_HOST_TO_DEVICE)
  215. {
  216. rt_memcpy(ufs->bounce, orig_ptr, orig_size);
  217. }
  218. dma_ptr = ufs->bounce;
  219. dma_size = orig_size;
  220. }
  221. ufs_prepare_upiu_cmd(ufs, req, rsp, sdev, cmd);
  222. if ((err = ufs_prepare_utrd(ufs, data_dir, cmd->data.size)))
  223. {
  224. goto _end;
  225. }
  226. if ((err = ufs_prepare_prdt(ufs, dma_ptr, dma_size, &prdt_entries)))
  227. {
  228. goto _end;
  229. }
  230. utrd->prd_table_length = rt_cpu_to_le16(prdt_entries);
  231. /* Basic debug information before submitting UTP */
  232. LOG_D("%s: UTP submit: opcode=%#02x dir=%u data_len=%u slot=%u",
  233. rt_dm_dev_get_name(ufs->parent.dev),
  234. cmd->op.unknow.opcode, data_dir,
  235. cmd->data.size, RT_UFS_SLOT_ID);
  236. LOG_D("%s: UTP ctrl regs: HCS=%#08x IE=%#08x HCE=%#08x",
  237. rt_dm_dev_get_name(ufs->parent.dev),
  238. HWREG32(regs + RT_UFS_REG_HCS),
  239. HWREG32(regs + RT_UFS_REG_IE),
  240. HWREG32(regs + RT_UFS_REG_HCE));
  241. LOG_D("%s: UTP ctrl ready bits: UTRLRDY=%u UTMRLRDY=%u UICRDY=%u",
  242. rt_dm_dev_get_name(ufs->parent.dev),
  243. (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UTRLRDY) ? 1 : 0,
  244. (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UTMRLRDY) ? 1 : 0,
  245. (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UCRDY) ? 1 : 0);
  246. LOG_D("%s: UTP UTRD: dword_0=%#08x dword_2(ocs)=%#08x",
  247. rt_dm_dev_get_name(ufs->parent.dev),
  248. rt_le32_to_cpu(utrd->header.dword_0),
  249. rt_le32_to_cpu(utrd->header.dword_2));
  250. LOG_D("%s: UTP UTRD: ucd_base_lo=%#08x ucd_base_hi=%#08x resp_off_dw=%u resp_len_dw=%u prd_off_dw=%u prd_len=%u",
  251. rt_dm_dev_get_name(ufs->parent.dev),
  252. rt_le32_to_cpu(utrd->command_desc_base_addr_lo),
  253. rt_le32_to_cpu(utrd->command_desc_base_addr_hi),
  254. rt_le16_to_cpu(utrd->response_upiu_offset),
  255. rt_le16_to_cpu(utrd->response_upiu_length),
  256. rt_le16_to_cpu(utrd->prd_table_offset),
  257. rt_le16_to_cpu(utrd->prd_table_length));
  258. /*
  259. * DMA descriptors/data must be visible to controller.
  260. * Clean dcache for UTRD + UCD (command/response/PRDT) before ringing doorbell.
  261. */
  262. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, ufs->utrd, sizeof(*utrd));
  263. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, ufs->ucd_base, ufs->ucd_size);
  264. if (dma_ptr && dma_size && data_dir == RT_UTP_HOST_TO_DEVICE)
  265. {
  266. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, dma_ptr, dma_size);
  267. }
  268. /* Clear interrupt status for transfer complete */
  269. ufs->irq_status = 0;
  270. /* Ack any stale pending IRQ bits before kicking */
  271. HWREG32(regs + RT_UFS_REG_IS) = RT_UINT32_MAX;
  272. /* Enable UTP completion/error interrupts */
  273. HWREG32(regs + RT_UFS_REG_IE) = RT_UFS_REG_IE_UTRCE | RT_UFS_REG_IE_UTPEE | RT_UFS_REG_IE_DFEE | RT_UFS_REG_IE_UEE;
  274. /* Ring door bell for slot 0 */
  275. rt_spin_lock(&ufs->lock);
  276. HWREG32(regs + RT_UFS_REG_UTRLDBR) = RT_BIT(RT_UFS_SLOT_ID);
  277. rt_spin_unlock(&ufs->lock);
  278. /* Interrupt mode: wait for completion signaled by ISR */
  279. if ((err = rt_completion_wait(&ufs->done, rt_tick_from_millisecond(RT_UFS_UTP_TIMEOUT_MS))))
  280. {
  281. LOG_E("%s: UFS UTP wait timeout: IS=%#08x irq_status=%#08x",
  282. rt_dm_dev_get_name(ufs->parent.dev),
  283. HWREG32(regs + RT_UFS_REG_IS), ufs->irq_status);
  284. /* Dump UPIU header and PRDT entry for post-mortem */
  285. LOG_E("%s: UTP UPIU: tx=%u flags=%#02x lun=%u tag=%u seg_len(be16)=%u",
  286. rt_dm_dev_get_name(ufs->parent.dev),
  287. req->header.transaction_code, req->header.flags,
  288. req->header.lun, req->header.task_tag,
  289. rt_be16_to_cpu(req->header.data_segment_length));
  290. if (cmd->data.size > 0)
  291. {
  292. LOG_E("%s: UTP PRDT[0]: addr=%#llx size(le32)=%#08x",
  293. rt_dm_dev_get_name(ufs->parent.dev),
  294. (rt_uint64_t)(&((struct rt_utp_transfer_cmd_desc *)ufs->ucd_base)->prd_table[0])->addr,
  295. (&((struct rt_utp_transfer_cmd_desc *)ufs->ucd_base)->prd_table[0])->size);
  296. }
  297. goto _end;
  298. }
  299. is = ufs->irq_status | HWREG32(regs + RT_UFS_REG_IS);
  300. if (is & (RT_UFS_REG_IS_UTPES | RT_UFS_REG_IS_DFES | RT_UFS_REG_IS_UE))
  301. {
  302. err = -RT_ERROR;
  303. goto _end;
  304. }
  305. /* Ensure controller-written status/data are seen */
  306. /* UTRL is 1KB aligned; invalidate a full slot region to avoid cacheline/alias issues */
  307. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, ufs->utrl_base, RT_UFS_UTR_LIST_ALIGN);
  308. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, ufs->ucd_base, ufs->ucd_size);
  309. if (dma_ptr && dma_size && data_dir == RT_UTP_DEVICE_TO_HOST)
  310. {
  311. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, dma_ptr, dma_size);
  312. }
  313. /*
  314. * If bounce buffer is used for device->host, copy it back immediately after
  315. * invalidation so that any post-processing (e.g. READ CAPACITY emulation)
  316. * operates on the latest data. Otherwise, the late copy at out_restore would
  317. * overwrite any modifications.
  318. */
  319. if (use_bounce && data_dir == RT_UTP_DEVICE_TO_HOST && orig_ptr && orig_size)
  320. {
  321. rt_memcpy(orig_ptr, ufs->bounce, orig_size);
  322. bounce_copied = RT_TRUE;
  323. }
  324. LOG_D("%s: UFS UTRD complete: dword_2=%#08x (ocs=%#x)",
  325. rt_dm_dev_get_name(ufs->parent.dev),
  326. rt_le32_to_cpu(utrd->header.dword_2),
  327. (rt_le32_to_cpu(utrd->header.dword_2) & 0x0f));
  328. /*
  329. * Post-process SCSI response in resp_buf (INQUIRY etc.).
  330. * READ CAPACITY: pass through device-reported block_size / last_block.
  331. * Use resp_buf: after bounce_copied, cmd->data.ptr may alias response memory.
  332. */
  333. resp_buf = (bounce_copied && orig_ptr) ? orig_ptr : cmd->data.ptr;
  334. if (!resp_buf)
  335. {
  336. goto _no_resp_buf;
  337. }
  338. if (cmd->op.unknow.opcode == RT_SCSI_CMD_INQUIRY &&
  339. cmd->data.size >= sizeof(struct rt_scsi_inquiry_data))
  340. {
  341. struct rt_scsi_inquiry_data *inq = (struct rt_scsi_inquiry_data *)resp_buf;
  342. /* Align with AHCI behavior: always expose direct-access block device */
  343. inq->devtype = SCSI_DEVICE_TYPE_DIRECT;
  344. inq->rmb = 0;
  345. inq->length = 95 - 4;
  346. }
  347. _no_resp_buf:
  348. /* Log response header for troubleshooting */
  349. LOG_D("%s: UFS RSP: tc=%#02x rsp=%#02x sts=%#02x seg_len=%u sense_len=%u",
  350. rt_dm_dev_get_name(ufs->parent.dev),
  351. rsp->header.transaction_code,
  352. rsp->header.response,
  353. rsp->header.status,
  354. rt_be16_to_cpu(rsp->header.data_segment_length),
  355. rt_be16_to_cpu(rsp->sr.sense_data_len));
  356. ocs = rt_le32_to_cpu(utrd->header.dword_2) & 0xf;
  357. if (ocs != RT_UFS_OCS_SUCCESS)
  358. {
  359. LOG_E("%s: UFS OCS %#x", rt_dm_dev_get_name(ufs->parent.dev), ocs);
  360. err = -RT_ERROR;
  361. goto _end;
  362. }
  363. if (rsp->header.transaction_code != RT_UPIU_TRANSACTION_RESPONSE)
  364. {
  365. err = -RT_ERROR;
  366. goto _end;
  367. }
  368. if (rsp->header.status != 0)
  369. {
  370. err = -RT_ERROR;
  371. goto _end;
  372. }
  373. /* Sense data: copy to cmd when check condition (BSP may use cmd->data.ptr for sense) */
  374. if (err != RT_EOK && rsp->sr.sense_data_len != 0 && cmd->data.ptr && cmd->data.size >= RT_UFS_SENSE_SIZE)
  375. {
  376. rt_uint16_t sense_len = rt_be16_to_cpu(rsp->sr.sense_data_len);
  377. sense_len = rt_min_t(rt_uint16_t, sense_len, RT_UFS_SENSE_SIZE);
  378. rt_memcpy(cmd->data.ptr, rsp->sr.sense_data, sense_len);
  379. }
  380. _end:
  381. if (use_bounce && !bounce_copied && err == RT_EOK && data_dir == RT_UTP_DEVICE_TO_HOST && orig_ptr && orig_size)
  382. {
  383. rt_memcpy(orig_ptr, ufs->bounce, orig_size);
  384. }
  385. return err;
  386. }
  387. static rt_err_t ufs_host_reset(struct rt_scsi_device *sdev)
  388. {
  389. struct rt_ufs_host *ufs = rt_container_of(sdev->host, struct rt_ufs_host, parent);
  390. if (ufs->ops->reset)
  391. {
  392. return ufs->ops->reset(ufs);
  393. }
  394. return RT_EOK;
  395. }
  396. static rt_err_t ufs_host_transfer(struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
  397. {
  398. rt_err_t err;
  399. struct rt_ufs_host *ufs = rt_container_of(sdev->host, struct rt_ufs_host, parent);
  400. switch (cmd->op.unknow.opcode)
  401. {
  402. case RT_SCSI_CMD_REQUEST_SENSE:
  403. case RT_SCSI_CMD_READ10:
  404. case RT_SCSI_CMD_READ12:
  405. case RT_SCSI_CMD_READ16:
  406. case RT_SCSI_CMD_WRITE10:
  407. case RT_SCSI_CMD_WRITE12:
  408. case RT_SCSI_CMD_WRITE16:
  409. case RT_SCSI_CMD_SYNCHRONIZE_CACHE10:
  410. case RT_SCSI_CMD_SYNCHRONIZE_CACHE16:
  411. case RT_SCSI_CMD_READ_CAPACITY10:
  412. case RT_SCSI_CMD_READ_CAPACITY16:
  413. case RT_SCSI_CMD_TEST_UNIT_READY:
  414. case RT_SCSI_CMD_INQUIRY:
  415. err = ufs_utp_transfer(ufs, sdev, cmd);
  416. break;
  417. case RT_SCSI_CMD_WRITE_SAME10:
  418. case RT_SCSI_CMD_WRITE_SAME16:
  419. case RT_SCSI_CMD_MODE_SENSE:
  420. case RT_SCSI_CMD_MODE_SENSE10:
  421. case RT_SCSI_CMD_MODE_SELECT:
  422. case RT_SCSI_CMD_MODE_SELECT10:
  423. err = -RT_ENOSYS;
  424. break;
  425. default:
  426. err = -RT_EINVAL;
  427. break;
  428. }
  429. return err;
  430. }
  431. static struct rt_scsi_ops ufs_host_ops =
  432. {
  433. .reset = ufs_host_reset,
  434. .transfer = ufs_host_transfer,
  435. };
  436. static void ufs_isr(int irqno, void *param)
  437. {
  438. rt_uint32_t is;
  439. struct rt_ufs_host *ufs = param;
  440. if (!(is = HWREG32(ufs->regs + RT_UFS_REG_IS)))
  441. {
  442. return;
  443. }
  444. ufs->irq_status |= is;
  445. HWREG32(ufs->regs + RT_UFS_REG_IS) = is;
  446. if (is & (RT_UFS_REG_IS_UTRCS | RT_UFS_REG_IS_UTPES | RT_UFS_REG_IS_DFES | RT_UFS_REG_IS_UE))
  447. {
  448. rt_completion_done(&ufs->done);
  449. }
  450. }
  451. rt_err_t rt_ufs_host_register(struct rt_ufs_host *ufs)
  452. {
  453. rt_err_t err;
  454. rt_uint32_t value;
  455. char dev_name[RT_NAME_MAX];
  456. struct rt_scsi_host *scsi;
  457. if (!ufs || !ufs->ops || !ufs->regs)
  458. {
  459. return -RT_EINVAL;
  460. }
  461. ufs->cap = HWREG32(ufs->regs + RT_UFS_REG_CAP);
  462. ufs->nutrs = RT_UFS_REG_CAP_NUTRS(ufs->cap) + 1;
  463. ufs->nutrs = rt_min_t(rt_uint32_t, 32, ufs->nutrs);
  464. /* Allocate UTRL (UTRD list) with required 1KB alignment */
  465. ufs->utrl_size = RT_UFS_UTR_LIST_BYTES(ufs->nutrs);
  466. ufs->utrl_size = rt_max_t(rt_size_t, ufs->utrl_size, RT_UFS_UTR_LIST_ALIGN);
  467. ufs->utrl_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->utrl_size, &ufs->utrl_handle);
  468. if (!ufs->utrl_base)
  469. {
  470. err = -RT_ENOMEM;
  471. goto _fail;
  472. }
  473. rt_memset(ufs->utrl_base, 0, ufs->utrl_size);
  474. ufs->utrd = (struct rt_utp_transfer_req_desc *)ufs->utrl_base; /* Slot0 */
  475. /* Allocate UCD (Command Descriptor) */
  476. ufs->ucd_size = ufs->ucd_size ? : RT_UFS_UCD_SIZE;
  477. ufs->ucd_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->ucd_size, &ufs->ucd_handle);
  478. if (!ufs->ucd_base)
  479. {
  480. err = -RT_ENOMEM;
  481. goto _fail;
  482. }
  483. rt_memset(ufs->ucd_base, 0, ufs->ucd_size);
  484. /* Allocate bounce buffer for small DMA transfers (512B is enough for inquiry/sense/capacity) */
  485. ufs->bounce_size = 4096;
  486. ufs->bounce_handle = 0;
  487. ufs->bounce = rt_dma_alloc_coherent(ufs->parent.dev, ufs->bounce_size, &ufs->bounce_handle);
  488. if (!ufs->bounce)
  489. {
  490. err = -RT_ENOMEM;
  491. goto _fail;
  492. }
  493. rt_memset(ufs->bounce, 0, ufs->bounce_size);
  494. /* Variant/board specific init (clocks/reset/phy) */
  495. if (ufs->ops->init && (err = ufs->ops->init(ufs)))
  496. {
  497. goto _fail;
  498. }
  499. /* Enable controller (HCE 1) */
  500. HWREG32(ufs->regs + RT_UFS_REG_HCE) = 0;
  501. rt_thread_mdelay(1);
  502. HWREG32(ufs->regs + RT_UFS_REG_HCE) = 0x1;
  503. rt_thread_mdelay(1);
  504. /* Program UTRD/UTMRD list base addresses */
  505. HWREG32(ufs->regs + RT_UFS_REG_UTRLBA) = rt_lower_32_bits(ufs->utrl_handle);
  506. HWREG32(ufs->regs + RT_UFS_REG_UTRLBAU) = rt_upper_32_bits(ufs->utrl_handle);
  507. /* UTMRL: allocate minimal aligned list if not provided */
  508. ufs->utmrl_size = RT_UFS_UTR_LIST_ALIGN;
  509. ufs->utmrl_handle = 0;
  510. ufs->utmrl_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->utmrl_size, &ufs->utmrl_handle);
  511. if (ufs->utmrl_base)
  512. {
  513. rt_memset(ufs->utmrl_base, 0, ufs->utmrl_size);
  514. }
  515. HWREG32(ufs->regs + RT_UFS_REG_UTMRLBA) = rt_lower_32_bits(ufs->utmrl_handle);
  516. HWREG32(ufs->regs + RT_UFS_REG_UTMRLBAU) = rt_upper_32_bits(ufs->utmrl_handle);
  517. /* Start run/stop */
  518. HWREG32(ufs->regs + RT_UFS_REG_UTRLRSR) = 0x1;
  519. HWREG32(ufs->regs + RT_UFS_REG_UTMRLRSR) = 0x1;
  520. /* Link startup: set UTRLRDY/UTMRLRDY */
  521. value = HWREG32(ufs->regs + RT_UFS_REG_HCS);
  522. if (!(value & RT_UFS_REG_HCS_UTRLRDY) || !(value & RT_UFS_REG_HCS_UTMRLRDY) || !(value & RT_UFS_REG_HCS_UCRDY))
  523. {
  524. if (ufs->ops->link_startup_notify)
  525. {
  526. ufs->ops->link_startup_notify(ufs, RT_UFS_NOTIFY_CHANGE_STATUS_PRE);
  527. }
  528. if ((err = rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_LINKSTARTUP, 0, &value, 0)))
  529. {
  530. goto _fail;
  531. }
  532. if (ufs->ops->link_startup_notify)
  533. {
  534. ufs->ops->link_startup_notify(ufs, RT_UFS_NOTIFY_CHANGE_STATUS_POST);
  535. }
  536. }
  537. ufs->pwr_active_valid = 0;
  538. /* ahit==0 lets rt_ufs_pm_post_linkup apply RT_UFS_AHIT_DEFAULT when CAP_AUTOH8 */
  539. rt_ufs_pm_post_linkup(ufs);
  540. rt_completion_init(&ufs->done);
  541. rt_spin_lock_init(&ufs->lock);
  542. rt_snprintf(dev_name, sizeof(dev_name), "ufs-%s", rt_dm_dev_get_name(ufs->parent.dev));
  543. rt_hw_interrupt_install(ufs->irq, ufs_isr, ufs, dev_name);
  544. rt_hw_interrupt_umask(ufs->irq);
  545. scsi = &ufs->parent;
  546. scsi->ops = &ufs_host_ops;
  547. scsi->max_id = rt_max_t(rt_size_t, scsi->max_id, 1);
  548. scsi->max_lun = rt_max_t(rt_size_t, scsi->max_lun, 1);
  549. scsi->parallel_io = RT_TRUE;
  550. if ((err = rt_scsi_host_register(scsi)))
  551. {
  552. goto _free_irq;
  553. }
  554. return RT_EOK;
  555. _free_irq:
  556. rt_hw_interrupt_mask(ufs->irq);
  557. rt_pic_detach_irq(ufs->irq, ufs);
  558. _fail:
  559. if (ufs->utrl_base)
  560. {
  561. rt_dma_free_coherent(ufs->parent.dev, ufs->utrl_size, ufs->utrl_base, ufs->utrl_handle);
  562. ufs->utrl_base = RT_NULL;
  563. }
  564. if (ufs->utmrl_base)
  565. {
  566. rt_dma_free_coherent(ufs->parent.dev, ufs->utmrl_size, ufs->utmrl_base, ufs->utmrl_handle);
  567. ufs->utmrl_base = RT_NULL;
  568. }
  569. if (ufs->ucd_base)
  570. {
  571. rt_dma_free_coherent(ufs->parent.dev, ufs->ucd_size, ufs->ucd_base, ufs->ucd_handle);
  572. ufs->ucd_base = RT_NULL;
  573. }
  574. if (ufs->bounce)
  575. {
  576. rt_dma_free_coherent(ufs->parent.dev, ufs->bounce_size, ufs->bounce, ufs->bounce_handle);
  577. ufs->bounce = RT_NULL;
  578. }
  579. return err;
  580. }
  581. rt_err_t rt_ufs_host_unregister(struct rt_ufs_host *ufs)
  582. {
  583. rt_err_t err = RT_EOK;
  584. rt_scsi_host_unregister(&ufs->parent);
  585. if (ufs->ops->exit && (err = ufs->ops->exit(ufs)))
  586. {
  587. LOG_W("%s: UFS ops->exit failed: %s", rt_dm_dev_get_name(ufs->parent.dev), rt_strerror(err));
  588. }
  589. if (ufs->utrl_base)
  590. {
  591. rt_dma_free_coherent(ufs->parent.dev, ufs->utrl_size, ufs->utrl_base, ufs->utrl_handle);
  592. ufs->utrl_base = RT_NULL;
  593. }
  594. if (ufs->utmrl_base)
  595. {
  596. rt_dma_free_coherent(ufs->parent.dev, ufs->utmrl_size, ufs->utmrl_base, ufs->utmrl_handle);
  597. ufs->utmrl_base = RT_NULL;
  598. }
  599. if (ufs->ucd_base)
  600. {
  601. rt_dma_free_coherent(ufs->parent.dev, ufs->ucd_size, ufs->ucd_base, ufs->ucd_handle);
  602. ufs->ucd_base = RT_NULL;
  603. }
  604. if (ufs->bounce)
  605. {
  606. rt_dma_free_coherent(ufs->parent.dev, ufs->bounce_size, ufs->bounce, ufs->bounce_handle);
  607. ufs->bounce = RT_NULL;
  608. }
  609. return err;
  610. }
  611. rt_err_t rt_ufs_uic_cmd_send(struct rt_ufs_host *ufs, rt_uint32_t cmd,
  612. rt_uint32_t arg1, rt_uint32_t *arg2, rt_uint32_t arg3)
  613. {
  614. rt_uint32_t is;
  615. rt_int32_t timeout;
  616. void *regs = ufs->regs;
  617. if (!regs)
  618. {
  619. return -RT_EINVAL;
  620. }
  621. timeout = rt_tick_from_millisecond(RT_UFS_UIC_TIMEOUT_MS);
  622. timeout += rt_tick_get();
  623. while (!(HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UCRDY))
  624. {
  625. if (rt_tick_get() >= timeout)
  626. {
  627. LOG_E("%s: UFS UIC not ready", rt_dm_dev_get_name(ufs->parent.dev));
  628. return -RT_ETIMEOUT;
  629. }
  630. rt_thread_mdelay(1);
  631. }
  632. HWREG32(regs + RT_UFS_REG_UCMDARG1) = arg1;
  633. HWREG32(regs + RT_UFS_REG_UCMDARG2) = arg2 ? *arg2 : 0;
  634. HWREG32(regs + RT_UFS_REG_UCMDARG3) = arg3;
  635. HWREG32(regs + RT_UFS_REG_UICCMD) = cmd & 0xff;
  636. timeout = rt_tick_from_millisecond(RT_UFS_UIC_TIMEOUT_MS);
  637. timeout += rt_tick_get();
  638. do {
  639. is = HWREG32(regs + RT_UFS_REG_IS);
  640. if (is & RT_UFS_REG_IS_UCCS)
  641. {
  642. break;
  643. }
  644. if (is & (RT_UFS_REG_IS_UE | RT_UFS_REG_IS_DFES))
  645. {
  646. return -RT_ERROR;
  647. }
  648. rt_thread_mdelay(1);
  649. } while (rt_tick_get() < timeout);
  650. if (!(is & RT_UFS_REG_IS_UCCS))
  651. {
  652. return -RT_ETIMEOUT;
  653. }
  654. HWREG32(regs + RT_UFS_REG_IS) = RT_UFS_REG_IS_UCCS;
  655. if (arg2)
  656. {
  657. *arg2 = HWREG32(regs + RT_UFS_REG_UCMDARG2);
  658. }
  659. return RT_EOK;
  660. }