mmc_acts.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * Copyright (c) 2017 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <init.h>
  8. #include <device.h>
  9. #include <irq.h>
  10. #include <drivers/dma.h>
  11. #include <drivers/gpio.h>
  12. #include <drivers/mmc/mmc.h>
  13. #include <soc.h>
  14. #include <string.h>
  15. #include <board_cfg.h>
  16. #include <logging/log.h>
  17. LOG_MODULE_REGISTER(mmc_acts, CONFIG_LOG_DEFAULT_LEVEL);
  18. /* timeout define */
  19. #define MMC_CMD_TIMEOUT_MS (2000)
  20. #define MMC_DAT_TIMEOUT_MS (500)
  21. #define MMC_DMA_BUFFER_BUSY_TIMEOUT_US (5000)
  22. /* SD_EN register */
  23. #define SD_EN_DW_SHIFT (0)
  24. #define SD_EN_DW(x) ((x) << SD_EN_DW_SHIFT)
  25. #define SD_EN_DW_MASK SD_EN_DW(0x3)
  26. #define SD_EN_DW_1BIT SD_EN_DW(0x0)
  27. #define SD_EN_DW_4BIT SD_EN_DW(0x1)
  28. #define SD_EN_DW_8BIT SD_EN_DW(0x2) /* ONLY valid in SD0 */
  29. #define SD_EN_SDIO BIT(3)
  30. #define SD_EN_FIFO_WIDTH BIT(5) /* 0: 32bits 1: 8bits */
  31. #define SD_EN_BUS_SEL_SHIFT (6)
  32. #define SD_EN_BUS_SEL(x) ((x) << SD_EN_BUS_SEL_SHIFT)
  33. #define SD_EN_BUS_SEL_MASK SD_EN_BUS_SEL(0x1)
  34. #define SD_EN_BUS_SEL_AHB SD_EN_BUS_SEL(0x0)
  35. #define SD_EN_BUS_SEL_DMA SD_EN_BUS_SEL(0x1)
  36. #define SD_EN_ENABLE BIT(7)
  37. #define SD_EN_CLK1 BIT(8) /* 0: clock to pad SD_CLK0; 1: clock to pad SD_CLK1 */
  38. #define SD_EN_SHIFT (9)
  39. #define SD_EN_CSS_MASK (1 << SD_EN_SHIFT)
  40. #define SD_EN_CSS_LOW (0 << SD_EN_SHIFT)
  41. #define SD_EN_CSS_HIGH (1 << SD_EN_SHIFT)
  42. #define SD_EN_RAND_SEL BIT(30) /* 0: USE EFFUSE bits; 1: use register SDx_SEED value */
  43. #define SD_EN_RAND_EN BIT(31) /* 0: randomizer disable; 1: randomizer enable */
  44. /* SD_CTL register */
  45. #define SD_CTL_TM_SHIFT (0)
  46. #define SD_CTL_TM(x) ((x) << SD_CTL_TM_SHIFT)
  47. #define SD_CTL_TM_MASK SD_CTL_TM(0xf)
  48. #define SD_CTL_TM_CMD_NO_RESP SD_CTL_TM(0x0)
  49. #define SD_CTL_TM_CMD_6B_RESP SD_CTL_TM(0x1)
  50. #define SD_CTL_TM_CMD_17B_RESP SD_CTL_TM(0x2)
  51. #define SD_CTL_TM_CMD_6B_RESP_BUSY SD_CTL_TM(0x3)
  52. #define SD_CTL_TM_CMD_RESP_DATA_IN SD_CTL_TM(0x4)
  53. #define SD_CTL_TM_CMD_RESP_DATA_OUT SD_CTL_TM(0x5)
  54. #define SD_CTL_TM_DATA_IN SD_CTL_TM(0x6)
  55. #define SD_CTL_TM_DATA_OUT SD_CTL_TM(0x7)
  56. #define SD_CTL_TM_CLK_OUT_ONLY SD_CTL_TM(0x8)
  57. #define SD_CTL_C7EN BIT(5) /* 0: enable CRC7 check; 1: disable CRC7 check */
  58. #define SD_CTL_LBE BIT(6) /* enable last block send 8 more clocks */
  59. #define SD_CTL_START BIT(7)
  60. #define SD_CTL_TCN_SHIFT (8)
  61. #define SD_CTL_TCN(x) ((x) << SD_CTL_TCN_SHIFT)
  62. #define SD_CTL_TCN_MASK SD_CTL_TCN(0xf)
  63. #define SD_CTL_SCC BIT(12)
  64. #define SD_CTL_CMDLEN BIT(13) /* 0: don't drives CMD line to low; 1: drive CMD line to low */
  65. #define SD_CTL_WDELAY_SHIFT (16)
  66. #define SD_CTL_WDELAY(x) ((x) << SD_CTL_WDELAY_SHIFT)
  67. #define SD_CTL_WDELAY_MASK SD_CTL_WDELAY(0xf)
  68. #define SD_CTL_RDELAY_SHIFT (20)
  69. #define SD_CTL_RDELAY(x) ((x) << SD_CTL_RDELAY_SHIFT)
  70. #define SD_CTL_RDELAY_MASK SD_CTL_RDELAY(0xf)
  71. /* SD_STATE register */
  72. #define SD_STATE_C7ER BIT(0)
  73. #define SD_STATE_RC16ER BIT(1)
  74. #define SD_STATE_WC16ER BIT(2)
  75. #define SD_STATE_CLC BIT(3)
  76. #define SD_STATE_CLNR BIT(4)
  77. #define SD_STATE_TRANS_IRQ_PD BIT(5)
  78. #define SD_STATE_TRANS_IRQ_EN BIT(6)
  79. #define SD_STATE_DAT0S BIT(7)
  80. #define SD_STATE_SDIO_IRQ_EN BIT(8)
  81. #define SD_STATE_SDIO_IRQ_PD BIT(9)
  82. #define SD_STATE_DAT1S BIT(10)
  83. #define SD_STATE_CMDS BIT(11)
  84. #define SD_STATE_MEMRDY BIT(12)
  85. #define SD_STATE_FIFO_FULL BIT(12)
  86. #define SD_STATE_FIFO_EMPTY BIT(13)
  87. #define SD_STATE_FIFO_RESET BIT(14)
  88. #define SD_STATE_TIMEOUT_ERROR BIT(15)
  89. #define SD_STATE_ERR_MASK (SD_STATE_CLNR | SD_STATE_WC16ER | \
  90. SD_STATE_RC16ER | SD_STATE_C7ER)
  91. /* SD_CMD register */
  92. #define SD_CMD_MASK (0xFF)
  93. /* SD_BLK_SIZE register */
  94. #define SD_BLK_SIZE_MASK (0x3FF)
  95. /* SD_BLK_NUM register */
  96. #define SD_BLK_NUM_MASK (0xFFFF)
  97. #define MMC_IS_SD0_DEV(x) ((uint32_t)(x) == SD0_REG_BASE)
  98. /* mmc hardware controller */
  99. struct acts_mmc_controller {
  100. volatile uint32_t en;
  101. volatile uint32_t ctl;
  102. volatile uint32_t state;
  103. volatile uint32_t cmd;
  104. volatile uint32_t arg;
  105. volatile uint32_t rspbuf[5];
  106. volatile uint32_t dat;
  107. volatile uint32_t blk_size;
  108. volatile uint32_t blk_num;
  109. };
  110. struct acts_mmc_config {
  111. struct acts_mmc_controller *base;
  112. void (*irq_config_func)(void);
  113. const char *dma_dev_name;
  114. uint8_t clock_id;
  115. uint8_t reset_id;
  116. uint8_t clk_sel; /*0 or 1 select clk0 or clk1*/
  117. uint8_t dma_id;
  118. uint8_t dma_chan;
  119. uint8_t flag_use_dma:1;
  120. uint8_t bus_width:3;
  121. uint8_t data_reg_width:3;
  122. uint8_t use_irq_gpio:1;
  123. uint8_t sdio_irq_gpio;
  124. gpio_dt_flags_t sdio_sdio_gpio_flags;
  125. char *sdio_irq_gpio_name;
  126. };
  127. struct acts_mmc_data {
  128. struct k_sem trans_done;
  129. const struct device *dma_dev;
  130. uint32_t capability;
  131. uint8_t rdelay;
  132. uint8_t wdelay;
  133. uint8_t dma_chan;
  134. uint8_t reserved[1];
  135. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  136. struct k_sem dma_sync;
  137. #endif
  138. void (*sdio_irq_cbk)(void *arg);
  139. void *sdio_irq_cbk_arg;
  140. const struct device *sdio_irq_gpio_dev;
  141. struct gpio_callback sdio_irq_gpio_cb;
  142. uint8_t device_release_flag : 1; /* device has been released flag such as sd card pluged out */
  143. };
  144. #if IS_ENABLED(CONFIG_MMC_0)||IS_ENABLED(CONFIG_MMC_1)
  145. #if (CONFIG_MMC_ACTS_ERROR_DETAIL == 1)
  146. static void mmc_acts_dump_regs(struct acts_mmc_controller *mmc)
  147. {
  148. LOG_INF( "** mmc contoller register ** \n");
  149. LOG_INF(" BASE: %08x\n", (uint32_t)mmc);
  150. LOG_INF(" SD_EN: %08x\n", mmc->en);
  151. LOG_INF(" SD_CTL: %08x\n", mmc->ctl);
  152. LOG_INF(" SD_STATE: %08x\n", mmc->state);
  153. LOG_INF(" SD_CMD: %08x\n", mmc->cmd);
  154. LOG_INF(" SD_ARG: %08x\n", mmc->arg);
  155. LOG_INF(" SD_RSPBUF0: %08x\n", mmc->rspbuf[0]);
  156. LOG_INF(" SD_RSPBUF1: %08x\n", mmc->rspbuf[1]);
  157. LOG_INF(" SD_RSPBUF2: %08x\n", mmc->rspbuf[2]);
  158. LOG_INF(" SD_RSPBUF3: %08x\n", mmc->rspbuf[3]);
  159. LOG_INF(" SD_RSPBUF4: %08x\n", mmc->rspbuf[4]);
  160. LOG_INF("SD_BLK_SIZE: %08x\n", mmc->blk_size);
  161. LOG_INF(" SD_BLK_NUM: %08x\n", mmc->blk_num);
  162. #ifdef CMU_SD0CLK
  163. LOG_INF(" CMU_SD0CLK: %08x\n", sys_read32(CMU_SD0CLK));
  164. #endif
  165. #ifdef CMU_SD1CLK
  166. LOG_INF(" CMU_SD1CLK: %08x\n", sys_read32(CMU_SD1CLK));
  167. #endif
  168. }
  169. #endif
  170. static int mmc_acts_check_err(const struct acts_mmc_config *cfg,
  171. struct acts_mmc_controller *mmc,
  172. uint32_t resp_err_mask)
  173. {
  174. uint32_t state = mmc->state & resp_err_mask;
  175. if (!(state & SD_STATE_ERR_MASK))
  176. return 0;
  177. if (state & SD_STATE_CLNR)
  178. LOG_ERR("Command No response");
  179. if (state & SD_STATE_C7ER)
  180. LOG_ERR("CRC command response Error");
  181. if (state & SD_STATE_RC16ER)
  182. LOG_ERR("CRC Read data Error");
  183. if (state & SD_STATE_WC16ER)
  184. LOG_ERR("CRC Write data Error");
  185. return 1;
  186. }
  187. static void mmc_acts_err_reset(const struct acts_mmc_config *cfg,
  188. struct acts_mmc_controller *mmc)
  189. {
  190. uint32_t en_bak, state_bak;
  191. en_bak = mmc->en;
  192. state_bak = mmc->state;
  193. /* reset mmc controller */
  194. acts_reset_peripheral(cfg->reset_id);
  195. mmc->en = en_bak;
  196. mmc->state = state_bak;
  197. }
  198. static void mmc_acts_trans_irq_setup(struct acts_mmc_controller *mmc,
  199. bool enable)
  200. {
  201. uint32_t key, state;
  202. key = irq_lock();
  203. state = mmc->state;
  204. /* don't clear sdio pending */
  205. state &= ~SD_STATE_SDIO_IRQ_PD;
  206. if (enable)
  207. state |= SD_STATE_TRANS_IRQ_EN;
  208. else
  209. state &= ~SD_STATE_TRANS_IRQ_EN;
  210. mmc->state = state;
  211. irq_unlock(key);
  212. }
  213. static void mmc_acts_sdio_irq_setup(struct acts_mmc_controller *mmc,
  214. bool enable)
  215. {
  216. uint32_t key, state;
  217. key = irq_lock();
  218. state = mmc->state;
  219. /* don't clear transfer irq pending */
  220. state &= ~SD_STATE_TRANS_IRQ_PD;
  221. if (enable)
  222. state |= SD_STATE_SDIO_IRQ_EN;
  223. else
  224. state &= ~SD_STATE_SDIO_IRQ_EN;
  225. mmc->state = state;
  226. irq_unlock(key);
  227. }
  228. static void mmc_acts_isr(void *arg)
  229. {
  230. struct device *dev = (struct device *)arg;
  231. const struct acts_mmc_config *cfg = dev->config;
  232. struct acts_mmc_data *data = dev->data;
  233. struct acts_mmc_controller *mmc = cfg->base;
  234. uint32_t state;
  235. state = mmc->state;
  236. LOG_DBG("enter isr: state 0x%x", state);
  237. if ((state & SD_STATE_TRANS_IRQ_EN) &&
  238. ((state & SD_STATE_TRANS_IRQ_PD) ||
  239. (state & SD_STATE_ERR_MASK))) {
  240. k_sem_give(&data->trans_done);
  241. }
  242. if ((state & SD_STATE_SDIO_IRQ_EN) &&
  243. (state & SD_STATE_SDIO_IRQ_PD)) {
  244. if (data->sdio_irq_cbk) {
  245. data->sdio_irq_cbk(data->sdio_irq_cbk_arg);
  246. }
  247. }
  248. /* clear irq pending, keep the error bits */
  249. mmc->state = state & (SD_STATE_TRANS_IRQ_EN | SD_STATE_SDIO_IRQ_EN |
  250. SD_STATE_TRANS_IRQ_PD | SD_STATE_SDIO_IRQ_PD);
  251. }
  252. static int mmc_acts_get_trans_mode(struct mmc_cmd *cmd, uint32_t *trans_mode,
  253. uint32_t *rsp_err_mask)
  254. {
  255. uint32_t mode =0, err_mask = 0;
  256. switch (mmc_resp_type(cmd)) {
  257. case MMC_RSP_NONE:
  258. mode = SD_CTL_TM_CMD_NO_RESP;
  259. break;
  260. case MMC_RSP_R1:
  261. if (cmd->buf) {
  262. if (cmd->flags & MMC_DATA_READ)
  263. mode = SD_CTL_TM_CMD_RESP_DATA_IN;
  264. else if (cmd->flags & MMC_DATA_WRITE)
  265. mode = SD_CTL_TM_CMD_RESP_DATA_OUT;
  266. else if (cmd->flags & MMC_DATA_WRITE_DIRECT)
  267. mode = SD_CTL_TM_DATA_OUT;
  268. else if (cmd->flags & MMC_DATA_READ_DIRECT)
  269. mode = SD_CTL_TM_DATA_IN;
  270. } else {
  271. mode = SD_CTL_TM_CMD_6B_RESP;
  272. }
  273. err_mask = SD_STATE_CLNR | SD_STATE_C7ER | SD_STATE_RC16ER |
  274. SD_STATE_WC16ER;
  275. break;
  276. case MMC_RSP_R1B:
  277. mode = SD_CTL_TM_CMD_6B_RESP_BUSY;
  278. err_mask = SD_STATE_CLNR | SD_STATE_C7ER;
  279. break;
  280. case MMC_RSP_R2:
  281. mode = SD_CTL_TM_CMD_17B_RESP;
  282. err_mask = SD_STATE_CLNR | SD_STATE_C7ER;
  283. break;
  284. case MMC_RSP_R3:
  285. mode = SD_CTL_TM_CMD_6B_RESP;
  286. err_mask = SD_STATE_CLNR;
  287. break;
  288. default:
  289. LOG_ERR("unsupported RSP 0x%x\n", mmc_resp_type(cmd));
  290. return -ENOTSUP;
  291. }
  292. if (trans_mode)
  293. *trans_mode = mode;
  294. if (rsp_err_mask)
  295. *rsp_err_mask = err_mask;
  296. return 0;
  297. }
  298. static int mmc_acts_wait_cmd(struct acts_mmc_controller *mmc, int timeout_ms)
  299. {
  300. uint32_t start_time, curr_time;
  301. start_time = k_cycle_get_32();
  302. while (mmc->ctl & SD_CTL_START) {
  303. curr_time = k_cycle_get_32();
  304. if (k_cyc_to_us_floor32(curr_time - start_time)
  305. >= (timeout_ms * 1000)) {
  306. LOG_ERR("mmc cmd timeout");
  307. return -ETIMEDOUT;
  308. }
  309. }
  310. #if (CONFIG_MMC_WAIT_DAT1_BUSY == 1)
  311. start_time = k_cycle_get_32();
  312. while ((mmc->state & SD_STATE_DAT1S) == 0) {
  313. curr_time = k_cycle_get_32();
  314. if (k_cyc_to_us_floor32(curr_time - start_time)
  315. >= (timeout_ms * 1000)) {
  316. LOG_ERR("mmc dat1 timeout");
  317. return -ETIMEDOUT;
  318. }
  319. }
  320. #endif
  321. return 0;
  322. }
  323. static bool mmc_acts_data_is_ready(struct acts_mmc_controller *mmc,
  324. bool is_write, bool use_fifo)
  325. {
  326. uint32_t state = mmc->state;
  327. if (use_fifo)
  328. if (is_write)
  329. return (!(state & SD_STATE_FIFO_FULL));
  330. else
  331. return (!(state & SD_STATE_FIFO_EMPTY));
  332. else
  333. return (state & SD_STATE_MEMRDY);
  334. }
  335. static int mmc_acts_transfer_by_cpu(const struct device *dev,
  336. bool is_write, uint8_t *buf,
  337. int len, uint32_t timeout_ms)
  338. {
  339. const struct acts_mmc_config *cfg = dev->config;
  340. struct acts_mmc_controller *mmc = cfg->base;
  341. uint32_t start_time, curr_time;
  342. uint32_t data, data_len;
  343. mmc->ctl |= SD_CTL_START;
  344. start_time = k_cycle_get_32();
  345. while (len > 0) {
  346. if (mmc_acts_check_err(cfg, mmc, SD_STATE_CLNR)) {
  347. return -EIO;
  348. }
  349. if (mmc_acts_data_is_ready(mmc, is_write, CONFIG_MMC_STATE_FIFO)) {
  350. data_len = len < cfg->data_reg_width ? len : cfg->data_reg_width;
  351. if (is_write) {
  352. if (((uint32_t)buf & 0x3) || data_len < cfg->data_reg_width)
  353. memcpy(&data, buf, data_len);
  354. else
  355. data = *((uint32_t *)buf);
  356. mmc->dat = data;
  357. } else {
  358. data = mmc->dat;
  359. if (((uint32_t)buf & 0x3) || data_len < cfg->data_reg_width)
  360. memcpy(buf, &data, data_len);
  361. else
  362. *((uint32_t *)buf) = data;
  363. }
  364. buf += data_len;
  365. len -= data_len;
  366. }
  367. curr_time = k_cycle_get_32();
  368. if (k_cyc_to_us_floor32(curr_time - start_time)
  369. >= (timeout_ms * 1000)) {
  370. LOG_ERR("mmc io timeout, is_write %d", is_write);
  371. return -ETIMEDOUT;
  372. }
  373. }
  374. return 0;
  375. }
  376. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  377. #define DMA_IRQ_TC (0) /* DMA completion flag */
  378. #define DMA_IRQ_HF (1) /* DMA half-full flag */
  379. static void dma_done_callback(const struct device *dev, void *callback_data, uint32_t ch , int type)
  380. {
  381. struct acts_mmc_data *data = (struct acts_mmc_data *)callback_data;
  382. ARG_UNUSED(dev);
  383. ARG_UNUSED(ch);
  384. if (type != DMA_IRQ_TC)
  385. return;
  386. LOG_DBG("mmc dma transfer is done:0x%x\n", (u32_t)data);
  387. k_sem_give(&data->dma_sync);
  388. }
  389. #else
  390. static int mmc_dma_wait_timeout(const struct device *dma_dev, uint32_t dma_chan, uint32_t timeout_us)
  391. {
  392. uint32_t start_time, curr_time;
  393. struct dma_status stat = {0};
  394. int ret;
  395. start_time = k_cycle_get_32();
  396. while (1) {
  397. ret = dma_get_status(dma_dev, dma_chan, &stat);
  398. if (ret) {
  399. LOG_ERR("get dma(%d) status error %d\n", dma_chan, ret);
  400. return -EFAULT;
  401. }
  402. /* DMA transfer finish */
  403. if (!stat.pending_length)
  404. break;
  405. curr_time = k_cycle_get_32();
  406. if (k_cyc_to_us_floor32(curr_time - start_time) >= timeout_us) {
  407. LOG_ERR("wait mmc dma(%d) finish timeout", dma_chan);
  408. return -ETIMEDOUT;
  409. }
  410. }
  411. return 0;
  412. }
  413. #endif
  414. static int mmc_acts_transfer_by_dma(const struct device *dev,
  415. bool is_write, uint8_t *buf,
  416. int len, uint32_t timeout_ms)
  417. {
  418. const struct acts_mmc_config *cfg = dev->config;
  419. struct acts_mmc_data *data = dev->data;
  420. struct acts_mmc_controller *mmc = cfg->base;
  421. struct dma_config dma_cfg = {0};
  422. struct dma_block_config dma_block_cfg = {0};
  423. int err;
  424. data->device_release_flag = 0;
  425. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  426. dma_cfg.dma_callback = dma_done_callback;
  427. dma_cfg.user_data = data;
  428. dma_cfg.complete_callback_en = 1;
  429. #endif
  430. dma_cfg.block_count = 1;
  431. dma_cfg.head_block = &dma_block_cfg;
  432. dma_block_cfg.block_size = len;
  433. /* SD0 FIFO width can select 8bits and 32bits */
  434. if (MMC_IS_SD0_DEV(mmc)) {
  435. if (mmc->en & SD_EN_FIFO_WIDTH)
  436. dma_cfg.dest_data_size = 1; /* fifo width is 8bit */
  437. else
  438. dma_cfg.dest_data_size = 4; /* fifo width is 32bit */
  439. } else {
  440. /* SD1 FIFO width is fixed 8bits */
  441. dma_cfg.dest_data_size = 1;
  442. }
  443. if (is_write) {
  444. dma_cfg.dma_slot = cfg->dma_id;
  445. dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
  446. dma_block_cfg.source_address = (uint32_t)buf;
  447. dma_block_cfg.dest_address = (uint32_t)&mmc->dat;
  448. } else {
  449. dma_cfg.dma_slot = cfg->dma_id;
  450. dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
  451. dma_block_cfg.source_address = (uint32_t)&mmc->dat;
  452. dma_block_cfg.dest_address = (uint32_t)buf;
  453. }
  454. if (dma_config(data->dma_dev, data->dma_chan, &dma_cfg)) {
  455. LOG_ERR("dma%d config error\n", data->dma_chan);
  456. return -1;
  457. }
  458. if (dma_start(data->dma_dev, data->dma_chan)) {
  459. LOG_ERR("dma%d start error\n", data->dma_chan);
  460. return -1;
  461. }
  462. mmc->en |= SD_EN_BUS_SEL_DMA;
  463. mmc_acts_trans_irq_setup(mmc, true);
  464. /* start mmc controller state machine */
  465. mmc->ctl |= SD_CTL_START;
  466. /* wait until data transfer is done */
  467. err = k_sem_take(&data->trans_done, K_MSEC(timeout_ms));
  468. if (data->device_release_flag) {
  469. err = -ENXIO;
  470. } else {
  471. if(!err) {
  472. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  473. err = k_sem_take(&data->dma_sync, K_MSEC(timeout_ms));
  474. #else
  475. err = mmc_dma_wait_timeout(data->dma_dev, data->dma_chan,
  476. MMC_DMA_BUFFER_BUSY_TIMEOUT_US);
  477. #endif
  478. }
  479. if (!err) {
  480. /* wait controller idle */
  481. err = mmc_acts_wait_cmd(mmc, timeout_ms);
  482. }
  483. }
  484. mmc_acts_trans_irq_setup(mmc, false);
  485. mmc->en &= ~SD_EN_BUS_SEL_DMA;
  486. dma_stop(data->dma_dev, data->dma_chan);
  487. return err;
  488. }
  489. static int mmc_acts_send_cmd(const struct device *dev, struct mmc_cmd *cmd)
  490. {
  491. const struct acts_mmc_config *cfg = dev->config;
  492. struct acts_mmc_data *data = dev->data;
  493. struct acts_mmc_controller *mmc = cfg->base;
  494. int is_write = cmd->flags & (MMC_DATA_WRITE | MMC_DATA_WRITE_DIRECT);
  495. uint32_t ctl, rsp_err_mask, len, trans_mode;
  496. int err, timeout;
  497. LOG_DBG("CMD%02d: arg 0x%x flags 0x%x is_write %d \n",
  498. cmd->opcode, cmd->arg, cmd->flags, !!is_write);
  499. LOG_DBG(" blk_num 0x%x blk_size 0x%x, buf %p \n",
  500. cmd->blk_num, cmd->blk_size, cmd->buf);
  501. trans_mode = 0;
  502. err = mmc_acts_get_trans_mode(cmd, &trans_mode, &rsp_err_mask);
  503. if (err) {
  504. return err;
  505. }
  506. ctl = trans_mode | SD_CTL_RDELAY(data->rdelay) |
  507. SD_CTL_WDELAY(data->wdelay);
  508. if (cmd->buf)
  509. ctl |= SD_CTL_LBE;
  510. /* sdio wifi need continues clock */
  511. if (data->capability & MMC_CAP_SDIO_IRQ)
  512. ctl |= SD_CTL_SCC;
  513. #if (CONFIG_MMC_SD0_FIFO_WIDTH_8BITS != 1)
  514. if (MMC_IS_SD0_DEV(mmc)){
  515. if(((u32_t)cmd->buf & 0x3))
  516. mmc->en |= SD_EN_FIFO_WIDTH;
  517. else
  518. mmc->en &= ~SD_EN_FIFO_WIDTH;
  519. }
  520. #endif
  521. mmc->ctl = ctl;
  522. mmc->arg = cmd->arg;
  523. mmc->cmd = cmd->opcode;
  524. mmc->blk_num = cmd->blk_num;
  525. mmc->blk_size = cmd->blk_size;
  526. if (!cmd->buf) {
  527. /* only command need to transfer */
  528. mmc->ctl |= SD_CTL_START;
  529. } else {
  530. /* command with data transfer */
  531. len = cmd->blk_num * cmd->blk_size;
  532. timeout = cmd->blk_num * MMC_DAT_TIMEOUT_MS;
  533. /* When SD0 FIFO and DMA width is 32bits, data address in memory shall align by 32bits */
  534. if (!cfg->flag_use_dma) {
  535. err = mmc_acts_transfer_by_cpu(dev, is_write,
  536. cmd->buf, len, timeout);
  537. } else {
  538. err = mmc_acts_transfer_by_dma(dev, is_write,
  539. cmd->buf, len, timeout);
  540. }
  541. }
  542. err |= mmc_acts_wait_cmd(mmc, MMC_CMD_TIMEOUT_MS);
  543. err |= mmc_acts_check_err(cfg, mmc, rsp_err_mask);
  544. if (err) {
  545. /*
  546. * FIXME: the operation of detecting card by polling maybe
  547. * output no reponse error message periodically. So filter
  548. * it out by config.
  549. */
  550. #if (CONFIG_MMC_ACTS_ERROR_DETAIL == 1)
  551. LOG_ERR("send cmd%d error, state 0x%x \n",
  552. cmd->opcode, mmc->state);
  553. mmc_acts_dump_regs(mmc);
  554. #endif
  555. mmc_acts_err_reset(cfg, mmc);
  556. return -EIO;
  557. }
  558. /* process responses */
  559. if (!cmd->buf && (cmd->flags & MMC_RSP_PRESENT)) {
  560. if (cmd->flags & MMC_RSP_136) {
  561. /* MSB first */
  562. cmd->resp[3] = mmc->rspbuf[0];
  563. cmd->resp[2] = mmc->rspbuf[1];
  564. cmd->resp[1] = mmc->rspbuf[2];
  565. cmd->resp[0] = mmc->rspbuf[3];
  566. } else {
  567. cmd->resp[0] = (mmc->rspbuf[1] << 24) |
  568. (mmc->rspbuf[0] >> 8);
  569. cmd->resp[1] = (mmc->rspbuf[1] << 24) >> 8;
  570. }
  571. }
  572. return 0;
  573. }
  574. static int mmc_acts_set_sdio_irq_cbk(const struct device *dev,
  575. sdio_irq_callback_t callback,
  576. void *arg)
  577. {
  578. struct acts_mmc_data *data = dev->data;
  579. if (!(data->capability & MMC_CAP_SDIO_IRQ))
  580. return -ENOTSUP;
  581. data->sdio_irq_cbk = callback;
  582. data->sdio_irq_cbk_arg = arg;
  583. return 0;
  584. }
  585. static void sdio_irq_gpio_callback(const struct device *port,
  586. struct gpio_callback *cb,
  587. uint32_t pins)
  588. {
  589. struct acts_mmc_data *data =
  590. CONTAINER_OF(cb, struct acts_mmc_data, sdio_irq_gpio_cb);
  591. ARG_UNUSED(pins);
  592. if (data->sdio_irq_cbk) {
  593. data->sdio_irq_cbk(data->sdio_irq_cbk_arg);
  594. }
  595. }
  596. static void mmc_acts_sdio_irq_gpio_setup(const struct device *dev, bool enable)
  597. {
  598. const struct acts_mmc_config *cfg = dev->config;
  599. struct acts_mmc_data *data = dev->data;
  600. if (enable)
  601. gpio_pin_interrupt_configure(data->sdio_irq_gpio_dev,
  602. cfg->sdio_irq_gpio, GPIO_INT_EDGE_TO_ACTIVE);
  603. else
  604. gpio_pin_interrupt_configure(data->sdio_irq_gpio_dev,
  605. cfg->sdio_irq_gpio, GPIO_INT_EDGE_TO_INACTIVE);
  606. }
  607. static int mmc_acts_enable_sdio_irq(const struct device *dev, bool enable)
  608. {
  609. const struct acts_mmc_config *cfg = dev->config;
  610. struct acts_mmc_data *data = dev->data;
  611. struct acts_mmc_controller *mmc = cfg->base;
  612. if (!(data->capability & MMC_CAP_SDIO_IRQ))
  613. return -ENOTSUP;
  614. if (data->capability & MMC_CAP_4_BIT_DATA) {
  615. mmc_acts_sdio_irq_setup(mmc, enable);
  616. mmc->en |= SD_EN_SDIO;
  617. } else {
  618. if(cfg->use_irq_gpio){
  619. mmc_acts_sdio_irq_gpio_setup(dev, enable);
  620. } else {
  621. LOG_ERR("enable_sdio_irq fail\n");
  622. }
  623. }
  624. return 0;
  625. }
  626. static int mmc_acts_set_bus_width(const struct device *dev, unsigned int bus_width)
  627. {
  628. const struct acts_mmc_config *cfg = dev->config;
  629. struct acts_mmc_controller *mmc = cfg->base;
  630. LOG_DBG("bus_width=%d\n", bus_width);
  631. if (bus_width == MMC_BUS_WIDTH_1) {
  632. mmc->en &= ~SD_EN_DW_MASK;
  633. mmc->en |= SD_EN_DW_1BIT;
  634. } else if (bus_width == MMC_BUS_WIDTH_4) {
  635. mmc->en &= ~SD_EN_DW_MASK;
  636. mmc->en |= SD_EN_DW_4BIT;
  637. } else {
  638. return -EINVAL;
  639. }
  640. return 0;
  641. }
  642. static int mmc_acts_set_clock(const struct device *dev, unsigned int rate_hz)
  643. {
  644. const struct acts_mmc_config *cfg = dev->config;
  645. struct acts_mmc_data *data = dev->data;
  646. uint32_t rdelay, wdelay;
  647. /*
  648. * Set the RDELAY and WDELAY based on the sd clk.
  649. */
  650. if (rate_hz < 200000) {
  651. rdelay = 0x8;
  652. wdelay = 0xf;
  653. } else if (rate_hz <= 15000000) {
  654. rdelay = 0x8;
  655. wdelay = 0xa;
  656. } else if (rate_hz <= 30000000) {
  657. rdelay = 0x8;
  658. wdelay = 0x9;
  659. } else {
  660. rdelay = 0x8;
  661. wdelay = 0x8;
  662. }
  663. clk_set_rate(cfg->clock_id, rate_hz);
  664. /* config delay chain */
  665. data->rdelay = rdelay;
  666. data->wdelay = wdelay;
  667. return 0;
  668. }
  669. static uint32_t mmc_acts_get_capability(const struct device *dev)
  670. {
  671. struct acts_mmc_data *data = dev->data;
  672. return data->capability;
  673. }
  674. static int mmc_acts_release_device(const struct device *dev)
  675. {
  676. struct acts_mmc_data *data = dev->data;
  677. u32_t key = irq_lock();
  678. data->device_release_flag = 1;
  679. k_sem_give(&data->trans_done);
  680. k_sem_reset(&data->trans_done);
  681. irq_unlock(key);
  682. return 0;
  683. }
  684. static const struct mmc_driver_api mmc_acts_driver_api = {
  685. .get_capability = mmc_acts_get_capability,
  686. .set_clock = mmc_acts_set_clock,
  687. .set_bus_width = mmc_acts_set_bus_width,
  688. .send_cmd = mmc_acts_send_cmd,
  689. .set_sdio_irq_callback = mmc_acts_set_sdio_irq_cbk,
  690. .enable_sdio_irq = mmc_acts_enable_sdio_irq,
  691. .release_device = mmc_acts_release_device,
  692. };
  693. static int mmc_acts_init(const struct device *dev)
  694. {
  695. const struct acts_mmc_config *cfg = dev->config;
  696. struct acts_mmc_data *data = dev->data;
  697. struct acts_mmc_controller *mmc = cfg->base;
  698. int chan;
  699. LOG_INF("mmc_acts_init\n");
  700. if(cfg->flag_use_dma) {
  701. data->dma_dev = device_get_binding(cfg->dma_dev_name);
  702. if (!data->dma_dev) {
  703. LOG_ERR("cannot found dma device\n");
  704. return -ENODEV;
  705. }
  706. chan = dma_request(data->dma_dev, cfg->dma_chan);
  707. if(chan < 0){
  708. LOG_ERR("request dma chan config err chan=%d\n", cfg->dma_chan);
  709. return -ENODEV;
  710. }
  711. data->dma_chan = chan;
  712. LOG_INF("use dma=%d\n", chan);
  713. }
  714. if (cfg->use_irq_gpio) {
  715. data->sdio_irq_gpio_dev = device_get_binding(cfg->sdio_irq_gpio_name);
  716. if (!data->sdio_irq_gpio_dev) {
  717. LOG_ERR("cannot found sdio irq gpio dev device %s",
  718. cfg->sdio_irq_gpio_name);
  719. return -ENODEV;
  720. }
  721. /* Configure IRQ pin and the IRQ call-back/handler */
  722. gpio_pin_configure(data->sdio_irq_gpio_dev,
  723. cfg->sdio_irq_gpio,
  724. GPIO_INPUT | GPIO_INT_EDGE_TO_INACTIVE | cfg->sdio_sdio_gpio_flags);
  725. gpio_init_callback(&data->sdio_irq_gpio_cb,
  726. sdio_irq_gpio_callback,
  727. BIT(cfg->sdio_irq_gpio));
  728. if (gpio_add_callback(data->sdio_irq_gpio_dev,
  729. &data->sdio_irq_gpio_cb)) {
  730. LOG_ERR("add sdio irq fun fail dev device %s",
  731. cfg->sdio_irq_gpio_name);
  732. return -EINVAL;
  733. }
  734. }
  735. /* enable mmc controller clock */
  736. acts_clock_peripheral_enable(cfg->clock_id);
  737. /* reset mmc controller */
  738. acts_reset_peripheral(cfg->reset_id);
  739. /* set initial clock */
  740. mmc_acts_set_clock((struct device *)dev, 100000);
  741. /* enable mmc controller */
  742. if(cfg->clk_sel)
  743. mmc->en |= SD_EN_ENABLE | SD_EN_CLK1;
  744. else
  745. mmc->en |= SD_EN_ENABLE;
  746. #if (CONFIG_MMC_SD0_FIFO_WIDTH_8BITS == 1)
  747. if (MMC_IS_SD0_DEV(mmc))
  748. mmc->en |= SD_EN_FIFO_WIDTH;
  749. #endif
  750. k_sem_init(&data->trans_done, 0, 1);
  751. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  752. k_sem_init(&data->dma_sync, 0, 1);
  753. #endif
  754. if (MMC_IS_SD0_DEV(mmc)) {
  755. #if (CONFIG_MMC_0_BUS_WIDTH == 4)
  756. data->capability = (MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED);
  757. #else
  758. data->capability = MMC_CAP_SD_HIGHSPEED;
  759. #endif
  760. #if (CONFIG_MMC_0_ENABLE_SDIO_IRQ == 1)
  761. data->capability |= MMC_CAP_SDIO_IRQ;
  762. #endif
  763. } else {
  764. #if (CONFIG_MMC_1_BUS_WIDTH == 4)
  765. data->capability = (MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED);
  766. #else
  767. data->capability = MMC_CAP_SD_HIGHSPEED;
  768. #endif
  769. #if (CONFIG_MMC_1_ENABLE_SDIO_IRQ == 1)
  770. data->capability |= MMC_CAP_SDIO_IRQ;
  771. #endif
  772. }
  773. cfg->irq_config_func();
  774. return 0;
  775. }
  776. #define dma_use(n) (\
  777. .dma_dev_name = CONFIG_DMA_0_NAME, \
  778. .dma_id = CONFIG_MMC_##n##_DMA_ID,\
  779. .dma_chan = CONFIG_MMC_##n##_DMA_CHAN,\
  780. .flag_use_dma = 1, \
  781. )
  782. #define dma_not(n) (\
  783. .flag_use_dma = 0, \
  784. )
  785. #define gpio_irq_use(n) (\
  786. .sdio_irq_gpio_name = CONFIG_MMC_##n##_GPIO_IRQ_DEV, \
  787. .sdio_irq_gpio = CONFIG_MMC_##n##_GPIO_IRQ_NUM,\
  788. .sdio_sdio_gpio_flags = CONFIG_MMC_##n##_GPIO_IRQ_FLAG,\
  789. .use_irq_gpio = 1, \
  790. )
  791. #define gpio_irq_not(n) (\
  792. .use_irq_gpio = 0, \
  793. )
  794. #define MMC_ACTS_DEFINE_CONFIG(n) \
  795. static const struct device DEVICE_NAME_GET(mmc##n##_acts); \
  796. static void mmc##n##_acts_irq_config(void) \
  797. { \
  798. IRQ_CONNECT(IRQ_ID_SD##n, CONFIG_MMC_##n##_IRQ_PRI, \
  799. mmc_acts_isr, \
  800. DEVICE_GET(mmc##n##_acts), 0); \
  801. irq_enable(IRQ_ID_SD##n); \
  802. } \
  803. static const struct acts_mmc_config mmc_acts_config_##n = { \
  804. .base = (struct acts_mmc_controller *)SD##n##_REG_BASE,\
  805. .irq_config_func = mmc##n##_acts_irq_config, \
  806. .clock_id = CLOCK_ID_SD##n,\
  807. .reset_id = RESET_ID_SD##n,\
  808. .clk_sel = CONFIG_MMC_##n##_CLKSEL,\
  809. .bus_width = CONFIG_MMC_##n##_BUS_WIDTH,\
  810. .data_reg_width = CONFIG_MMC_##n##_DATA_REG_WIDTH,\
  811. COND_CODE_1(CONFIG_MMC_##n##_USE_DMA, dma_use(n), dma_not(n))\
  812. COND_CODE_1(CONFIG_MMC_##n##_USE_GPIO_IRQ, gpio_irq_use(n), gpio_irq_not(n))\
  813. }
  814. //irq-gpios
  815. #define MMC_ACTS_DEVICE_INIT(n) \
  816. MMC_ACTS_DEFINE_CONFIG(n); \
  817. static struct acts_mmc_data mmc_acts_dev_data_##n ; \
  818. DEVICE_DEFINE(mmc##n##_acts, \
  819. CONFIG_MMC_##n##_NAME, \
  820. &mmc_acts_init, NULL, &mmc_acts_dev_data_##n, \
  821. &mmc_acts_config_##n, POST_KERNEL, \
  822. 20, &mmc_acts_driver_api);
  823. #if IS_ENABLED(CONFIG_MMC_0)
  824. MMC_ACTS_DEVICE_INIT(0)
  825. #endif
  826. #if IS_ENABLED(CONFIG_MMC_1)
  827. MMC_ACTS_DEVICE_INIT(1)
  828. #endif
  829. #endif //#if IS_ENABLED(CONFIG_MMC_0)||IS_ENABLED(CONFIG_MMC_1)