mmc_acts.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * Copyright (c) 2017 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <init.h>
  8. #include <device.h>
  9. #include <irq.h>
  10. #include <drivers/dma.h>
  11. #include <drivers/gpio.h>
  12. #include <drivers/mmc/mmc.h>
  13. #include <soc.h>
  14. #include <string.h>
  15. #include <board_cfg.h>
  16. #include <logging/log.h>
  17. LOG_MODULE_REGISTER(mmc_acts, CONFIG_LOG_DEFAULT_LEVEL);
  18. /* timeout define */
  19. #define MMC_CMD_TIMEOUT_MS (2000)
  20. #define MMC_DAT_TIMEOUT_MS (500)
  21. #define MMC_DMA_BUFFER_BUSY_TIMEOUT_US (5000)
  22. /* SD_EN register */
  23. #define SD_EN_DW_SHIFT (0)
  24. #define SD_EN_DW(x) ((x) << SD_EN_DW_SHIFT)
  25. #define SD_EN_DW_MASK SD_EN_DW(0x3)
  26. #define SD_EN_DW_1BIT SD_EN_DW(0x0)
  27. #define SD_EN_DW_4BIT SD_EN_DW(0x1)
  28. #define SD_EN_DW_8BIT SD_EN_DW(0x2) /* ONLY valid in SD0 */
  29. #define SD_EN_SDIO BIT(3)
  30. #define SD_EN_FIFO_WIDTH BIT(5) /* 0: 32bits 1: 8bits */
  31. #define SD_EN_BUS_SEL_SHIFT (6)
  32. #define SD_EN_BUS_SEL(x) ((x) << SD_EN_BUS_SEL_SHIFT)
  33. #define SD_EN_BUS_SEL_MASK SD_EN_BUS_SEL(0x1)
  34. #define SD_EN_BUS_SEL_AHB SD_EN_BUS_SEL(0x0)
  35. #define SD_EN_BUS_SEL_DMA SD_EN_BUS_SEL(0x1)
  36. #define SD_EN_ENABLE BIT(7)
  37. #define SD_EN_CLK1 BIT(8) /* 0: clock to pad SD_CLK0; 1: clock to pad SD_CLK1 */
  38. #define SD_EN_SHIFT (9)
  39. #define SD_EN_CSS_MASK (1 << SD_EN_SHIFT)
  40. #define SD_EN_CSS_LOW (0 << SD_EN_SHIFT)
  41. #define SD_EN_CSS_HIGH (1 << SD_EN_SHIFT)
  42. #define SD_EN_RAND_SEL BIT(30) /* 0: USE EFFUSE bits; 1: use register SDx_SEED value */
  43. #define SD_EN_RAND_EN BIT(31) /* 0: randomizer disable; 1: randomizer enable */
  44. /* SD_CTL register */
  45. #define SD_CTL_TM_SHIFT (0)
  46. #define SD_CTL_TM(x) ((x) << SD_CTL_TM_SHIFT)
  47. #define SD_CTL_TM_MASK SD_CTL_TM(0xf)
  48. #define SD_CTL_TM_CMD_NO_RESP SD_CTL_TM(0x0)
  49. #define SD_CTL_TM_CMD_6B_RESP SD_CTL_TM(0x1)
  50. #define SD_CTL_TM_CMD_17B_RESP SD_CTL_TM(0x2)
  51. #define SD_CTL_TM_CMD_6B_RESP_BUSY SD_CTL_TM(0x3)
  52. #define SD_CTL_TM_CMD_RESP_DATA_IN SD_CTL_TM(0x4)
  53. #define SD_CTL_TM_CMD_RESP_DATA_OUT SD_CTL_TM(0x5)
  54. #define SD_CTL_TM_DATA_IN SD_CTL_TM(0x6)
  55. #define SD_CTL_TM_DATA_OUT SD_CTL_TM(0x7)
  56. #define SD_CTL_TM_CLK_OUT_ONLY SD_CTL_TM(0x8)
  57. #define SD_CTL_C7EN BIT(5) /* 0: enable CRC7 check; 1: disable CRC7 check */
  58. #define SD_CTL_LBE BIT(6) /* enable last block send 8 more clocks */
  59. #define SD_CTL_START BIT(7)
  60. #define SD_CTL_TCN_SHIFT (8)
  61. #define SD_CTL_TCN(x) ((x) << SD_CTL_TCN_SHIFT)
  62. #define SD_CTL_TCN_MASK SD_CTL_TCN(0xf)
  63. #define SD_CTL_SCC BIT(12)
  64. #define SD_CTL_CMDLEN BIT(13) /* 0: don't drives CMD line to low; 1: drive CMD line to low */
  65. #define SD_CTL_WDELAY_SHIFT (16)
  66. #define SD_CTL_WDELAY(x) ((x) << SD_CTL_WDELAY_SHIFT)
  67. #define SD_CTL_WDELAY_MASK SD_CTL_WDELAY(0xf)
  68. #define SD_CTL_RDELAY_SHIFT (20)
  69. #define SD_CTL_RDELAY(x) ((x) << SD_CTL_RDELAY_SHIFT)
  70. #define SD_CTL_RDELAY_MASK SD_CTL_RDELAY(0xf)
  71. /* SD_STATE register */
  72. #define SD_STATE_C7ER BIT(0)
  73. #define SD_STATE_RC16ER BIT(1)
  74. #define SD_STATE_WC16ER BIT(2)
  75. #define SD_STATE_CLC BIT(3)
  76. #define SD_STATE_CLNR BIT(4)
  77. #define SD_STATE_TRANS_IRQ_PD BIT(5)
  78. #define SD_STATE_TRANS_IRQ_EN BIT(6)
  79. #define SD_STATE_DAT0S BIT(7)
  80. #define SD_STATE_SDIO_IRQ_EN BIT(8)
  81. #define SD_STATE_SDIO_IRQ_PD BIT(9)
  82. #define SD_STATE_DAT1S BIT(10)
  83. #define SD_STATE_CMDS BIT(11)
  84. #define SD_STATE_MEMRDY BIT(12)
  85. #define SD_STATE_FIFO_FULL BIT(12)
  86. #define SD_STATE_FIFO_EMPTY BIT(13)
  87. #define SD_STATE_FIFO_RESET BIT(14)
  88. #define SD_STATE_TIMEOUT_ERROR BIT(15)
  89. #define SD_STATE_ERR_MASK (SD_STATE_CLNR | SD_STATE_WC16ER | \
  90. SD_STATE_RC16ER | SD_STATE_C7ER)
  91. /* SD_CMD register */
  92. #define SD_CMD_MASK (0xFF)
  93. /* SD_BLK_SIZE register */
  94. #define SD_BLK_SIZE_MASK (0x3FF)
  95. /* SD_BLK_NUM register */
  96. #define SD_BLK_NUM_MASK (0xFFFF)
  97. #define MMC_IS_SD0_DEV(x) ((uint32_t)(x) == SD0_REG_BASE)
  98. /* mmc hardware controller */
  99. struct acts_mmc_controller {
  100. volatile uint32_t en;
  101. volatile uint32_t ctl;
  102. volatile uint32_t state;
  103. volatile uint32_t cmd;
  104. volatile uint32_t arg;
  105. volatile uint32_t rspbuf[5];
  106. volatile uint32_t dat;
  107. volatile uint32_t blk_size;
  108. volatile uint32_t blk_num;
  109. };
  110. struct acts_mmc_config {
  111. struct acts_mmc_controller *base;
  112. void (*irq_config_func)(void);
  113. const char *dma_dev_name;
  114. uint8_t clock_id;
  115. uint8_t reset_id;
  116. uint8_t clk_sel; /*0 or 1 select clk0 or clk1*/
  117. uint8_t dma_id;
  118. uint8_t dma_chan;
  119. uint8_t flag_use_dma:1;
  120. uint8_t bus_width:4;
  121. uint8_t data_reg_width:3;
  122. uint8_t use_irq_gpio:1;
  123. uint8_t sdio_irq_gpio;
  124. gpio_dt_flags_t sdio_sdio_gpio_flags;
  125. char *sdio_irq_gpio_name;
  126. };
  127. struct acts_mmc_data {
  128. struct k_sem trans_done;
  129. const struct device *dma_dev;
  130. uint32_t capability;
  131. uint8_t rdelay;
  132. uint8_t wdelay;
  133. uint8_t dma_chan;
  134. uint8_t reserved[1];
  135. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  136. struct k_sem dma_sync;
  137. #endif
  138. void (*sdio_irq_cbk)(void *arg);
  139. void *sdio_irq_cbk_arg;
  140. const struct device *sdio_irq_gpio_dev;
  141. struct gpio_callback sdio_irq_gpio_cb;
  142. uint8_t device_release_flag : 1; /* device has been released flag such as sd card pluged out */
  143. };
  144. #if IS_ENABLED(CONFIG_MMC_0)||IS_ENABLED(CONFIG_MMC_1)
  145. #if (CONFIG_MMC_ACTS_ERROR_DETAIL == 1)
  146. static void mmc_acts_dump_regs(struct acts_mmc_controller *mmc)
  147. {
  148. LOG_INF( "** mmc contoller register ** \n");
  149. LOG_INF(" BASE: %08x\n", (uint32_t)mmc);
  150. LOG_INF(" SD_EN: %08x\n", mmc->en);
  151. LOG_INF(" SD_CTL: %08x\n", mmc->ctl);
  152. LOG_INF(" SD_STATE: %08x\n", mmc->state);
  153. LOG_INF(" SD_CMD: %08x\n", mmc->cmd);
  154. LOG_INF(" SD_ARG: %08x\n", mmc->arg);
  155. LOG_INF(" SD_RSPBUF0: %08x\n", mmc->rspbuf[0]);
  156. LOG_INF(" SD_RSPBUF1: %08x\n", mmc->rspbuf[1]);
  157. LOG_INF(" SD_RSPBUF2: %08x\n", mmc->rspbuf[2]);
  158. LOG_INF(" SD_RSPBUF3: %08x\n", mmc->rspbuf[3]);
  159. LOG_INF(" SD_RSPBUF4: %08x\n", mmc->rspbuf[4]);
  160. LOG_INF("SD_BLK_SIZE: %08x\n", mmc->blk_size);
  161. LOG_INF(" SD_BLK_NUM: %08x\n", mmc->blk_num);
  162. #ifdef CMU_SD0CLK
  163. LOG_INF(" CMU_SD0CLK: %08x\n", sys_read32(CMU_SD0CLK));
  164. #endif
  165. #ifdef CMU_SD1CLK
  166. LOG_INF(" CMU_SD1CLK: %08x\n", sys_read32(CMU_SD1CLK));
  167. #endif
  168. }
  169. #endif
  170. static int mmc_acts_check_err(const struct acts_mmc_config *cfg,
  171. struct acts_mmc_controller *mmc,
  172. uint32_t resp_err_mask)
  173. {
  174. uint32_t state = mmc->state & resp_err_mask;
  175. if (!(state & SD_STATE_ERR_MASK))
  176. return 0;
  177. if (state & SD_STATE_CLNR)
  178. LOG_ERR("Command No response");
  179. if (state & SD_STATE_C7ER)
  180. LOG_ERR("CRC command response Error");
  181. if (state & SD_STATE_RC16ER)
  182. LOG_ERR("CRC Read data Error");
  183. if (state & SD_STATE_WC16ER)
  184. LOG_ERR("CRC Write data Error");
  185. return 1;
  186. }
  187. static void mmc_acts_err_reset(const struct acts_mmc_config *cfg,
  188. struct acts_mmc_controller *mmc)
  189. {
  190. uint32_t en_bak, state_bak;
  191. en_bak = mmc->en;
  192. state_bak = mmc->state;
  193. /* reset mmc controller */
  194. acts_reset_peripheral(cfg->reset_id);
  195. mmc->en = en_bak;
  196. mmc->state = state_bak;
  197. }
  198. static void mmc_acts_trans_irq_setup(struct acts_mmc_controller *mmc,
  199. bool enable)
  200. {
  201. uint32_t key, state;
  202. key = irq_lock();
  203. state = mmc->state;
  204. /* don't clear sdio pending */
  205. state &= ~SD_STATE_SDIO_IRQ_PD;
  206. if (enable)
  207. state |= SD_STATE_TRANS_IRQ_EN;
  208. else
  209. state &= ~SD_STATE_TRANS_IRQ_EN;
  210. mmc->state = state;
  211. irq_unlock(key);
  212. }
  213. static void mmc_acts_sdio_irq_setup(struct acts_mmc_controller *mmc,
  214. bool enable)
  215. {
  216. uint32_t key, state;
  217. key = irq_lock();
  218. state = mmc->state;
  219. /* don't clear transfer irq pending */
  220. state &= ~SD_STATE_TRANS_IRQ_PD;
  221. if (enable)
  222. state |= SD_STATE_SDIO_IRQ_EN;
  223. else
  224. state &= ~SD_STATE_SDIO_IRQ_EN;
  225. mmc->state = state;
  226. irq_unlock(key);
  227. }
  228. static void mmc_acts_isr(void *arg)
  229. {
  230. struct device *dev = (struct device *)arg;
  231. const struct acts_mmc_config *cfg = dev->config;
  232. struct acts_mmc_data *data = dev->data;
  233. struct acts_mmc_controller *mmc = cfg->base;
  234. uint32_t state;
  235. state = mmc->state;
  236. LOG_DBG("enter isr: state 0x%x", state);
  237. if ((state & SD_STATE_TRANS_IRQ_EN) &&
  238. ((state & SD_STATE_TRANS_IRQ_PD) ||
  239. (state & SD_STATE_ERR_MASK))) {
  240. k_sem_give(&data->trans_done);
  241. }
  242. if ((state & SD_STATE_SDIO_IRQ_EN) &&
  243. (state & SD_STATE_SDIO_IRQ_PD)) {
  244. if (data->sdio_irq_cbk) {
  245. data->sdio_irq_cbk(data->sdio_irq_cbk_arg);
  246. }
  247. }
  248. /* clear irq pending, keep the error bits */
  249. mmc->state = state & (SD_STATE_TRANS_IRQ_EN | SD_STATE_SDIO_IRQ_EN |
  250. SD_STATE_TRANS_IRQ_PD | SD_STATE_SDIO_IRQ_PD);
  251. }
  252. static int mmc_acts_get_trans_mode(struct mmc_cmd *cmd, uint32_t *trans_mode,
  253. uint32_t *rsp_err_mask)
  254. {
  255. uint32_t mode =0, err_mask = 0;
  256. switch (mmc_resp_type(cmd)) {
  257. case MMC_RSP_NONE:
  258. mode = SD_CTL_TM_CMD_NO_RESP;
  259. break;
  260. case MMC_RSP_R1:
  261. if (cmd->buf) {
  262. if (cmd->flags & MMC_DATA_READ)
  263. mode = SD_CTL_TM_CMD_RESP_DATA_IN;
  264. else if (cmd->flags & MMC_DATA_WRITE)
  265. mode = SD_CTL_TM_CMD_RESP_DATA_OUT;
  266. else if (cmd->flags & MMC_DATA_WRITE_DIRECT)
  267. mode = SD_CTL_TM_DATA_OUT;
  268. else if (cmd->flags & MMC_DATA_READ_DIRECT)
  269. mode = SD_CTL_TM_DATA_IN;
  270. } else {
  271. mode = SD_CTL_TM_CMD_6B_RESP;
  272. }
  273. err_mask = SD_STATE_CLNR | SD_STATE_C7ER | SD_STATE_RC16ER |
  274. SD_STATE_WC16ER;
  275. break;
  276. case MMC_RSP_R1B:
  277. mode = SD_CTL_TM_CMD_6B_RESP_BUSY;
  278. err_mask = SD_STATE_CLNR | SD_STATE_C7ER;
  279. break;
  280. case MMC_RSP_R2:
  281. mode = SD_CTL_TM_CMD_17B_RESP;
  282. err_mask = SD_STATE_CLNR | SD_STATE_C7ER;
  283. break;
  284. case MMC_RSP_R3:
  285. mode = SD_CTL_TM_CMD_6B_RESP;
  286. err_mask = SD_STATE_CLNR;
  287. break;
  288. default:
  289. LOG_ERR("unsupported RSP 0x%x\n", mmc_resp_type(cmd));
  290. return -ENOTSUP;
  291. }
  292. if (trans_mode)
  293. *trans_mode = mode;
  294. if (rsp_err_mask)
  295. *rsp_err_mask = err_mask;
  296. return 0;
  297. }
  298. static int mmc_acts_wait_cmd(struct acts_mmc_controller *mmc, int timeout_ms)
  299. {
  300. uint32_t start_time, curr_time;
  301. start_time = k_cycle_get_32();
  302. while (mmc->ctl & SD_CTL_START) {
  303. curr_time = k_cycle_get_32();
  304. if (k_cyc_to_us_floor32(curr_time - start_time)
  305. >= (timeout_ms * 1000)) {
  306. LOG_ERR("mmc cmd timeout");
  307. return -ETIMEDOUT;
  308. }
  309. }
  310. #if (CONFIG_MMC_WAIT_DAT1_BUSY == 1)
  311. start_time = k_cycle_get_32();
  312. while ((mmc->state & SD_STATE_DAT1S) == 0) {
  313. curr_time = k_cycle_get_32();
  314. if (k_cyc_to_us_floor32(curr_time - start_time)
  315. >= (timeout_ms * 1000)) {
  316. LOG_ERR("mmc dat1 timeout");
  317. return -ETIMEDOUT;
  318. }
  319. }
  320. #endif
  321. return 0;
  322. }
  323. static bool mmc_acts_data_is_ready(struct acts_mmc_controller *mmc,
  324. bool is_write, bool use_fifo)
  325. {
  326. uint32_t state = mmc->state;
  327. if (use_fifo)
  328. if (is_write)
  329. return (!(state & SD_STATE_FIFO_FULL));
  330. else
  331. return (!(state & SD_STATE_FIFO_EMPTY));
  332. else
  333. return (state & SD_STATE_MEMRDY);
  334. }
  335. static int mmc_acts_transfer_by_cpu(const struct device *dev,
  336. bool is_write, uint8_t *buf,
  337. int len, uint32_t timeout_ms)
  338. {
  339. const struct acts_mmc_config *cfg = dev->config;
  340. struct acts_mmc_controller *mmc = cfg->base;
  341. uint32_t start_time, curr_time;
  342. uint32_t data, data_len;
  343. mmc->ctl |= SD_CTL_START;
  344. start_time = k_cycle_get_32();
  345. while (len > 0) {
  346. if (mmc_acts_check_err(cfg, mmc, SD_STATE_CLNR)) {
  347. return -EIO;
  348. }
  349. if (mmc_acts_data_is_ready(mmc, is_write, CONFIG_MMC_STATE_FIFO)) {
  350. data_len = len < cfg->data_reg_width ? len : cfg->data_reg_width;
  351. if (is_write) {
  352. if (((uint32_t)buf & 0x3) || data_len < cfg->data_reg_width)
  353. memcpy(&data, buf, data_len);
  354. else
  355. data = *((uint32_t *)buf);
  356. mmc->dat = data;
  357. } else {
  358. data = mmc->dat;
  359. if (((uint32_t)buf & 0x3) || data_len < cfg->data_reg_width)
  360. memcpy(buf, &data, data_len);
  361. else
  362. *((uint32_t *)buf) = data;
  363. }
  364. buf += data_len;
  365. len -= data_len;
  366. }
  367. curr_time = k_cycle_get_32();
  368. if (k_cyc_to_us_floor32(curr_time - start_time)
  369. >= (timeout_ms * 1000)) {
  370. LOG_ERR("mmc io timeout, is_write %d", is_write);
  371. return -ETIMEDOUT;
  372. }
  373. }
  374. return 0;
  375. }
  376. #define DMA_IRQ_TC (0) /* DMA completion flag */
  377. #define DMA_IRQ_HF (1) /* DMA half-full flag */
  378. static void dma_done_callback(const struct device *dev, void *callback_data, uint32_t ch , int type)
  379. {
  380. struct acts_mmc_data *data = (struct acts_mmc_data *)callback_data;
  381. ARG_UNUSED(dev);
  382. ARG_UNUSED(ch);
  383. if (type != DMA_IRQ_TC)
  384. return;
  385. LOG_DBG("mmc dma transfer is done:0x%x\n", (u32_t)data);
  386. k_sem_give(&data->dma_sync);
  387. }
  388. static int mmc_dma_wait_timeout(const struct device *dma_dev, uint32_t dma_chan, uint32_t timeout_us)
  389. {
  390. uint32_t start_time, curr_time;
  391. struct dma_status stat = {0};
  392. int ret;
  393. start_time = k_cycle_get_32();
  394. while (1) {
  395. ret = dma_get_status(dma_dev, dma_chan, &stat);
  396. if (ret) {
  397. LOG_ERR("get dma(%d) status error %d\n", dma_chan, ret);
  398. return -EFAULT;
  399. }
  400. /* DMA transfer finish */
  401. if (!stat.pending_length)
  402. break;
  403. curr_time = k_cycle_get_32();
  404. if (k_cyc_to_us_floor32(curr_time - start_time) >= timeout_us) {
  405. LOG_ERR("wait mmc dma(%d) finish timeout", dma_chan);
  406. return -ETIMEDOUT;
  407. }
  408. }
  409. return 0;
  410. }
  411. static int mmc_acts_transfer_by_dma(const struct device *dev,
  412. bool is_write, uint8_t *buf,
  413. int len, uint32_t timeout_ms)
  414. {
  415. const struct acts_mmc_config *cfg = dev->config;
  416. struct acts_mmc_data *data = dev->data;
  417. struct acts_mmc_controller *mmc = cfg->base;
  418. struct dma_config dma_cfg = {0};
  419. struct dma_block_config dma_block_cfg = {0};
  420. int err;
  421. data->device_release_flag = 0;
  422. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  423. dma_cfg.dma_callback = dma_done_callback;
  424. dma_cfg.user_data = data;
  425. dma_cfg.complete_callback_en = 1;
  426. #endif
  427. dma_cfg.block_count = 1;
  428. dma_cfg.head_block = &dma_block_cfg;
  429. dma_block_cfg.block_size = len;
  430. /* SD0 FIFO width can select 8bits and 32bits */
  431. if (MMC_IS_SD0_DEV(mmc)) {
  432. if (mmc->en & SD_EN_FIFO_WIDTH)
  433. dma_cfg.dest_data_size = 1; /* fifo width is 8bit */
  434. else
  435. dma_cfg.dest_data_size = 4; /* fifo width is 32bit */
  436. } else {
  437. /* SD1 FIFO width is fixed 8bits */
  438. dma_cfg.dest_data_size = 1;
  439. }
  440. if (is_write) {
  441. dma_cfg.dma_slot = cfg->dma_id;
  442. dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
  443. dma_block_cfg.source_address = (uint32_t)buf;
  444. dma_block_cfg.dest_address = (uint32_t)&mmc->dat;
  445. } else {
  446. dma_cfg.dma_slot = cfg->dma_id;
  447. dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
  448. dma_block_cfg.source_address = (uint32_t)&mmc->dat;
  449. dma_block_cfg.dest_address = (uint32_t)buf;
  450. }
  451. if (dma_config(data->dma_dev, data->dma_chan, &dma_cfg)) {
  452. LOG_ERR("dma%d config error\n", data->dma_chan);
  453. return -1;
  454. }
  455. if (dma_start(data->dma_dev, data->dma_chan)) {
  456. LOG_ERR("dma%d start error\n", data->dma_chan);
  457. return -1;
  458. }
  459. mmc->en |= SD_EN_BUS_SEL_DMA;
  460. mmc_acts_trans_irq_setup(mmc, true);
  461. /* start mmc controller state machine */
  462. mmc->ctl |= SD_CTL_START;
  463. /* wait until data transfer is done */
  464. err = k_sem_take(&data->trans_done, K_MSEC(timeout_ms));
  465. if (data->device_release_flag) {
  466. err = -ENXIO;
  467. } else {
  468. if(!err) {
  469. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  470. err = k_sem_take(&data->dma_sync, K_MSEC(timeout_ms));
  471. #else
  472. err = mmc_dma_wait_timeout(data->dma_dev, data->dma_chan,
  473. MMC_DMA_BUFFER_BUSY_TIMEOUT_US);
  474. #endif
  475. }
  476. if (!err) {
  477. /* wait controller idle */
  478. err = mmc_acts_wait_cmd(mmc, timeout_ms);
  479. }
  480. }
  481. mmc_acts_trans_irq_setup(mmc, false);
  482. mmc->en &= ~SD_EN_BUS_SEL_DMA;
  483. dma_stop(data->dma_dev, data->dma_chan);
  484. return err;
  485. }
  486. static int mmc_acts_transfer_by_query(const struct device *dev,
  487. bool is_write, uint8_t *buf,
  488. int len, uint32_t timeout_ms)
  489. {
  490. const struct acts_mmc_config *cfg = dev->config;
  491. struct acts_mmc_data *data = dev->data;
  492. struct acts_mmc_controller *mmc = cfg->base;
  493. struct dma_config dma_cfg = {0};
  494. struct dma_block_config dma_block_cfg = {0};
  495. int err = 0;
  496. data->device_release_flag = 0;
  497. dma_cfg.block_count = 1;
  498. dma_cfg.head_block = &dma_block_cfg;
  499. dma_block_cfg.block_size = len;
  500. /* SD0 FIFO width can select 8bits and 32bits */
  501. if (MMC_IS_SD0_DEV(mmc)) {
  502. if (mmc->en & SD_EN_FIFO_WIDTH)
  503. dma_cfg.dest_data_size = 1; /* fifo width is 8bit */
  504. else
  505. dma_cfg.dest_data_size = 4; /* fifo width is 32bit */
  506. } else {
  507. /* SD1 FIFO width is fixed 8bits */
  508. dma_cfg.dest_data_size = 1;
  509. }
  510. if (is_write) {
  511. dma_cfg.dma_slot = cfg->dma_id;
  512. dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
  513. dma_block_cfg.source_address = (uint32_t)buf;
  514. dma_block_cfg.dest_address = (uint32_t)&mmc->dat;
  515. } else {
  516. dma_cfg.dma_slot = cfg->dma_id;
  517. dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
  518. dma_block_cfg.source_address = (uint32_t)&mmc->dat;
  519. dma_block_cfg.dest_address = (uint32_t)buf;
  520. }
  521. if (dma_config(data->dma_dev, data->dma_chan, &dma_cfg)) {
  522. LOG_ERR("dma%d config error\n", data->dma_chan);
  523. return -1;
  524. }
  525. if (dma_start(data->dma_dev, data->dma_chan)) {
  526. LOG_ERR("dma%d start error\n", data->dma_chan);
  527. return -1;
  528. }
  529. mmc->en |= SD_EN_BUS_SEL_DMA;
  530. /* start mmc controller state machine */
  531. mmc->ctl |= SD_CTL_START;
  532. if (data->device_release_flag) {
  533. err = -ENXIO;
  534. } else {
  535. if(!err) {
  536. err = mmc_dma_wait_timeout(data->dma_dev, data->dma_chan,
  537. MMC_DMA_BUFFER_BUSY_TIMEOUT_US);
  538. }
  539. if (!err) {
  540. /* wait controller idle */
  541. err = mmc_acts_wait_cmd(mmc, timeout_ms);
  542. }
  543. }
  544. mmc->en &= ~SD_EN_BUS_SEL_DMA;
  545. dma_stop(data->dma_dev, data->dma_chan);
  546. return err;
  547. }
  548. static int mmc_acts_send_cmd(const struct device *dev, struct mmc_cmd *cmd)
  549. {
  550. const struct acts_mmc_config *cfg = dev->config;
  551. struct acts_mmc_data *data = dev->data;
  552. struct acts_mmc_controller *mmc = cfg->base;
  553. int is_write = cmd->flags & (MMC_DATA_WRITE | MMC_DATA_WRITE_DIRECT);
  554. uint32_t ctl, rsp_err_mask, len, trans_mode;
  555. int err, timeout;
  556. extern int check_panic_exe(void);
  557. LOG_DBG("CMD%02d: arg 0x%x flags 0x%x is_write %d \n",
  558. cmd->opcode, cmd->arg, cmd->flags, !!is_write);
  559. LOG_DBG(" blk_num 0x%x blk_size 0x%x, buf %p \n",
  560. cmd->blk_num, cmd->blk_size, cmd->buf);
  561. trans_mode = 0;
  562. err = mmc_acts_get_trans_mode(cmd, &trans_mode, &rsp_err_mask);
  563. if (err) {
  564. return err;
  565. }
  566. ctl = trans_mode | SD_CTL_RDELAY(data->rdelay) |
  567. SD_CTL_WDELAY(data->wdelay);
  568. if (cmd->buf)
  569. ctl |= SD_CTL_LBE;
  570. /* sdio wifi need continues clock */
  571. if (data->capability & MMC_CAP_SDIO_IRQ)
  572. ctl |= SD_CTL_SCC;
  573. #if (CONFIG_MMC_SD0_FIFO_WIDTH_8BITS != 1)
  574. if (MMC_IS_SD0_DEV(mmc)){
  575. if(((u32_t)cmd->buf & 0x3))
  576. mmc->en |= SD_EN_FIFO_WIDTH;
  577. else
  578. mmc->en &= ~SD_EN_FIFO_WIDTH;
  579. }
  580. #endif
  581. mmc->ctl = ctl;
  582. mmc->arg = cmd->arg;
  583. mmc->cmd = cmd->opcode;
  584. mmc->blk_num = cmd->blk_num;
  585. mmc->blk_size = cmd->blk_size;
  586. if (!cmd->buf) {
  587. /* only command need to transfer */
  588. mmc->ctl |= SD_CTL_START;
  589. } else {
  590. /* command with data transfer */
  591. len = cmd->blk_num * cmd->blk_size;
  592. timeout = cmd->blk_num * MMC_DAT_TIMEOUT_MS;
  593. /* When SD0 FIFO and DMA width is 32bits, data address in memory shall align by 32bits */
  594. if (k_is_in_isr() && check_panic_exe()) {
  595. err = mmc_acts_transfer_by_query(dev, is_write,
  596. cmd->buf, len, timeout);
  597. } else if (!cfg->flag_use_dma) {
  598. err = mmc_acts_transfer_by_cpu(dev, is_write,
  599. cmd->buf, len, timeout);
  600. } else {
  601. err = mmc_acts_transfer_by_dma(dev, is_write,
  602. cmd->buf, len, timeout);
  603. }
  604. }
  605. err |= mmc_acts_wait_cmd(mmc, MMC_CMD_TIMEOUT_MS);
  606. err |= mmc_acts_check_err(cfg, mmc, rsp_err_mask);
  607. if (err) {
  608. /*
  609. * FIXME: the operation of detecting card by polling maybe
  610. * output no reponse error message periodically. So filter
  611. * it out by config.
  612. */
  613. #if (CONFIG_MMC_ACTS_ERROR_DETAIL == 1)
  614. LOG_ERR("send cmd%d error, state 0x%x \n",
  615. cmd->opcode, mmc->state);
  616. mmc_acts_dump_regs(mmc);
  617. #endif
  618. mmc_acts_err_reset(cfg, mmc);
  619. return -EIO;
  620. }
  621. /* process responses */
  622. if (!cmd->buf && (cmd->flags & MMC_RSP_PRESENT)) {
  623. if (cmd->flags & MMC_RSP_136) {
  624. /* MSB first */
  625. cmd->resp[3] = mmc->rspbuf[0];
  626. cmd->resp[2] = mmc->rspbuf[1];
  627. cmd->resp[1] = mmc->rspbuf[2];
  628. cmd->resp[0] = mmc->rspbuf[3];
  629. } else {
  630. cmd->resp[0] = (mmc->rspbuf[1] << 24) |
  631. (mmc->rspbuf[0] >> 8);
  632. cmd->resp[1] = (mmc->rspbuf[1] << 24) >> 8;
  633. }
  634. }
  635. return 0;
  636. }
  637. static int mmc_acts_set_sdio_irq_cbk(const struct device *dev,
  638. sdio_irq_callback_t callback,
  639. void *arg)
  640. {
  641. struct acts_mmc_data *data = dev->data;
  642. if (!(data->capability & MMC_CAP_SDIO_IRQ))
  643. return -ENOTSUP;
  644. data->sdio_irq_cbk = callback;
  645. data->sdio_irq_cbk_arg = arg;
  646. return 0;
  647. }
  648. static void sdio_irq_gpio_callback(const struct device *port,
  649. struct gpio_callback *cb,
  650. uint32_t pins)
  651. {
  652. struct acts_mmc_data *data =
  653. CONTAINER_OF(cb, struct acts_mmc_data, sdio_irq_gpio_cb);
  654. ARG_UNUSED(pins);
  655. if (data->sdio_irq_cbk) {
  656. data->sdio_irq_cbk(data->sdio_irq_cbk_arg);
  657. }
  658. }
  659. static void mmc_acts_sdio_irq_gpio_setup(const struct device *dev, bool enable)
  660. {
  661. const struct acts_mmc_config *cfg = dev->config;
  662. struct acts_mmc_data *data = dev->data;
  663. if (enable)
  664. gpio_pin_interrupt_configure(data->sdio_irq_gpio_dev,
  665. cfg->sdio_irq_gpio, GPIO_INT_EDGE_TO_ACTIVE);
  666. else
  667. gpio_pin_interrupt_configure(data->sdio_irq_gpio_dev,
  668. cfg->sdio_irq_gpio, GPIO_INT_EDGE_TO_INACTIVE);
  669. }
  670. static int mmc_acts_enable_sdio_irq(const struct device *dev, bool enable)
  671. {
  672. const struct acts_mmc_config *cfg = dev->config;
  673. struct acts_mmc_data *data = dev->data;
  674. struct acts_mmc_controller *mmc = cfg->base;
  675. if (!(data->capability & MMC_CAP_SDIO_IRQ))
  676. return -ENOTSUP;
  677. if (data->capability & MMC_CAP_4_BIT_DATA) {
  678. mmc_acts_sdio_irq_setup(mmc, enable);
  679. mmc->en |= SD_EN_SDIO;
  680. } else {
  681. if(cfg->use_irq_gpio){
  682. mmc_acts_sdio_irq_gpio_setup(dev, enable);
  683. } else {
  684. LOG_ERR("enable_sdio_irq fail\n");
  685. }
  686. }
  687. return 0;
  688. }
  689. static int mmc_acts_set_bus_width(const struct device *dev, unsigned int bus_width)
  690. {
  691. const struct acts_mmc_config *cfg = dev->config;
  692. struct acts_mmc_controller *mmc = cfg->base;
  693. LOG_DBG("bus_width=%d\n", bus_width);
  694. if (bus_width == MMC_BUS_WIDTH_1) {
  695. mmc->en &= ~SD_EN_DW_MASK;
  696. mmc->en |= SD_EN_DW_1BIT;
  697. } else if (bus_width == MMC_BUS_WIDTH_4) {
  698. mmc->en &= ~SD_EN_DW_MASK;
  699. mmc->en |= SD_EN_DW_4BIT;
  700. } else if (bus_width == MMC_BUS_WIDTH_8) {
  701. mmc->en &= ~SD_EN_DW_MASK;
  702. mmc->en |= SD_EN_DW_8BIT;
  703. } else {
  704. return -EINVAL;
  705. }
  706. return 0;
  707. }
  708. #ifdef CONFIG_SOC_SERIES_LEOPARD_FPGA
  709. #define LEOPARD_FPGA_MMC_CLK_MAX_HZ (1*1000*1000) //sram-6*1000*1000
  710. #endif
  711. static int mmc_acts_set_clock(const struct device *dev, unsigned int rate_hz)
  712. {
  713. const struct acts_mmc_config *cfg = dev->config;
  714. struct acts_mmc_data *data = dev->data;
  715. uint32_t rdelay, wdelay;
  716. #ifdef CONFIG_SOC_SERIES_LEOPARD_FPGA
  717. if (rate_hz > LEOPARD_FPGA_MMC_CLK_MAX_HZ)
  718. rate_hz = LEOPARD_FPGA_MMC_CLK_MAX_HZ;
  719. #endif
  720. /*
  721. * Set the RDELAY and WDELAY based on the sd clk.
  722. */
  723. if (rate_hz < 200000) {
  724. rdelay = 0xa;
  725. wdelay = 0xa;
  726. } /*else if (rate_hz <= 15000000) {
  727. rdelay = 0xa;
  728. wdelay = 0xf;
  729. } else if (rate_hz <= 30000000) {
  730. rdelay = 0x8;
  731. wdelay = 0x9;
  732. }*/ else {
  733. rdelay = 0x8;
  734. wdelay = 0x8;
  735. }
  736. clk_set_rate(cfg->clock_id, rate_hz);
  737. /* config delay chain */
  738. data->rdelay = rdelay;
  739. data->wdelay = wdelay;
  740. return 0;
  741. }
  742. static uint32_t mmc_acts_get_capability(const struct device *dev)
  743. {
  744. struct acts_mmc_data *data = dev->data;
  745. return data->capability;
  746. }
  747. static int mmc_acts_release_device(const struct device *dev)
  748. {
  749. struct acts_mmc_data *data = dev->data;
  750. u32_t key = irq_lock();
  751. data->device_release_flag = 1;
  752. k_sem_give(&data->trans_done);
  753. k_sem_reset(&data->trans_done);
  754. irq_unlock(key);
  755. return 0;
  756. }
  757. static const struct mmc_driver_api mmc_acts_driver_api = {
  758. .get_capability = mmc_acts_get_capability,
  759. .set_clock = mmc_acts_set_clock,
  760. .set_bus_width = mmc_acts_set_bus_width,
  761. .send_cmd = mmc_acts_send_cmd,
  762. .set_sdio_irq_callback = mmc_acts_set_sdio_irq_cbk,
  763. .enable_sdio_irq = mmc_acts_enable_sdio_irq,
  764. .release_device = mmc_acts_release_device,
  765. };
  766. static int mmc_acts_init(const struct device *dev)
  767. {
  768. const struct acts_mmc_config *cfg = dev->config;
  769. struct acts_mmc_data *data = dev->data;
  770. struct acts_mmc_controller *mmc = cfg->base;
  771. int chan;
  772. LOG_INF("mmc_acts_init\n");
  773. if(cfg->flag_use_dma) {
  774. data->dma_dev = device_get_binding(cfg->dma_dev_name);
  775. if (!data->dma_dev) {
  776. LOG_ERR("cannot found dma device\n");
  777. return -ENODEV;
  778. }
  779. chan = dma_request(data->dma_dev, cfg->dma_chan);
  780. if(chan < 0){
  781. LOG_ERR("request dma chan config err chan=%d\n", cfg->dma_chan);
  782. return -ENODEV;
  783. }
  784. data->dma_chan = chan;
  785. LOG_INF("use dma=%d\n", chan);
  786. }
  787. if (cfg->use_irq_gpio) {
  788. data->sdio_irq_gpio_dev = device_get_binding(cfg->sdio_irq_gpio_name);
  789. if (!data->sdio_irq_gpio_dev) {
  790. LOG_ERR("cannot found sdio irq gpio dev device %s",
  791. cfg->sdio_irq_gpio_name);
  792. return -ENODEV;
  793. }
  794. /* Configure IRQ pin and the IRQ call-back/handler */
  795. gpio_pin_configure(data->sdio_irq_gpio_dev,
  796. cfg->sdio_irq_gpio,
  797. GPIO_INPUT | GPIO_INT_EDGE_TO_INACTIVE | cfg->sdio_sdio_gpio_flags);
  798. gpio_init_callback(&data->sdio_irq_gpio_cb,
  799. sdio_irq_gpio_callback,
  800. BIT(cfg->sdio_irq_gpio));
  801. if (gpio_add_callback(data->sdio_irq_gpio_dev,
  802. &data->sdio_irq_gpio_cb)) {
  803. LOG_ERR("add sdio irq fun fail dev device %s",
  804. cfg->sdio_irq_gpio_name);
  805. return -EINVAL;
  806. }
  807. }
  808. /* enable mmc controller clock */
  809. acts_clock_peripheral_enable(cfg->clock_id);
  810. /* reset mmc controller */
  811. acts_reset_peripheral(cfg->reset_id);
  812. /* set initial clock */
  813. mmc_acts_set_clock((struct device *)dev, 100000);
  814. /* enable mmc controller */
  815. if(cfg->clk_sel)
  816. mmc->en |= SD_EN_ENABLE | SD_EN_CLK1;
  817. else
  818. mmc->en |= SD_EN_ENABLE;
  819. #if (CONFIG_MMC_SD0_FIFO_WIDTH_8BITS == 1)
  820. if (MMC_IS_SD0_DEV(mmc))
  821. mmc->en |= SD_EN_FIFO_WIDTH;
  822. #endif
  823. k_sem_init(&data->trans_done, 0, 1);
  824. #if (CONFIG_MMC_YIELD_WAIT_DMA_DONE == 1)
  825. k_sem_init(&data->dma_sync, 0, 1);
  826. #endif
  827. if (MMC_IS_SD0_DEV(mmc)) {
  828. #if (CONFIG_MMC_0_BUS_WIDTH == 8)
  829. data->capability = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED);
  830. #elif (CONFIG_MMC_0_BUS_WIDTH == 4)
  831. data->capability = (MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED);
  832. #else
  833. data->capability = MMC_CAP_SD_HIGHSPEED;
  834. #endif
  835. #if (CONFIG_MMC_0_ENABLE_SDIO_IRQ == 1)
  836. data->capability |= MMC_CAP_SDIO_IRQ;
  837. #endif
  838. } else {
  839. #if (CONFIG_MMC_1_BUS_WIDTH == 4)
  840. data->capability = (MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED);
  841. #else
  842. data->capability = MMC_CAP_SD_HIGHSPEED;
  843. #endif
  844. #if (CONFIG_MMC_1_ENABLE_SDIO_IRQ == 1)
  845. data->capability |= MMC_CAP_SDIO_IRQ;
  846. #endif
  847. }
  848. cfg->irq_config_func();
  849. return 0;
  850. }
  851. #ifdef CONFIG_PM_DEVICE
  852. int mmc_acts_resume_controller(const struct device *dev)
  853. {
  854. const struct acts_mmc_config *cfg = dev->config;
  855. struct acts_mmc_controller *mmc = cfg->base;
  856. /* reset mmc controller */
  857. acts_reset_peripheral(cfg->reset_id);
  858. /* enable mmc controller */
  859. if(cfg->clk_sel)
  860. mmc->en |= SD_EN_ENABLE | SD_EN_CLK1;
  861. else
  862. mmc->en |= SD_EN_ENABLE;
  863. #if (CONFIG_MMC_SD0_FIFO_WIDTH_8BITS == 1)
  864. if (MMC_IS_SD0_DEV(mmc))
  865. mmc->en |= SD_EN_FIFO_WIDTH;
  866. #endif
  867. return 0;
  868. }
  869. int mmc_acts_pm_control(const struct device *dev, enum pm_device_action action)
  870. {
  871. int ret = 0;
  872. switch (action) {
  873. case PM_DEVICE_ACTION_SUSPEND:
  874. break;
  875. case PM_DEVICE_ACTION_RESUME:
  876. /* mmc regs need resume in condition of cpu pwrgating */
  877. mmc_acts_resume_controller(dev);
  878. break;
  879. default:
  880. break;
  881. }
  882. return ret;
  883. }
  884. #else
  885. #define mmc_acts_pm_control NULL
  886. #endif
  887. #define dma_use(n) (\
  888. .dma_dev_name = CONFIG_DMA_0_NAME, \
  889. .dma_id = CONFIG_MMC_##n##_DMA_ID,\
  890. .dma_chan = CONFIG_MMC_##n##_DMA_CHAN,\
  891. .flag_use_dma = 1, \
  892. )
  893. #define dma_not(n) (\
  894. .flag_use_dma = 0, \
  895. )
  896. #define gpio_irq_use(n) (\
  897. .sdio_irq_gpio_name = CONFIG_MMC_##n##_GPIO_IRQ_DEV, \
  898. .sdio_irq_gpio = CONFIG_MMC_##n##_GPIO_IRQ_NUM,\
  899. .sdio_sdio_gpio_flags = CONFIG_MMC_##n##_GPIO_IRQ_FLAG,\
  900. .use_irq_gpio = 1, \
  901. )
  902. #define gpio_irq_not(n) (\
  903. .use_irq_gpio = 0, \
  904. )
  905. #define MMC_ACTS_DEFINE_CONFIG(n) \
  906. static const struct device DEVICE_NAME_GET(mmc##n##_acts); \
  907. static void mmc##n##_acts_irq_config(void) \
  908. { \
  909. IRQ_CONNECT(IRQ_ID_SD##n, CONFIG_MMC_##n##_IRQ_PRI, \
  910. mmc_acts_isr, \
  911. DEVICE_GET(mmc##n##_acts), 0); \
  912. irq_enable(IRQ_ID_SD##n); \
  913. } \
  914. static const struct acts_mmc_config mmc_acts_config_##n = { \
  915. .base = (struct acts_mmc_controller *)SD##n##_REG_BASE,\
  916. .irq_config_func = mmc##n##_acts_irq_config, \
  917. .clock_id = CLOCK_ID_SD##n,\
  918. .reset_id = RESET_ID_SD##n,\
  919. .clk_sel = CONFIG_MMC_##n##_CLKSEL,\
  920. .bus_width = CONFIG_MMC_##n##_BUS_WIDTH,\
  921. .data_reg_width = CONFIG_MMC_##n##_DATA_REG_WIDTH,\
  922. COND_CODE_1(CONFIG_MMC_##n##_USE_DMA, dma_use(n), dma_not(n))\
  923. COND_CODE_1(CONFIG_MMC_##n##_USE_GPIO_IRQ, gpio_irq_use(n), gpio_irq_not(n))\
  924. }
  925. //irq-gpios
  926. #define MMC_ACTS_DEVICE_INIT(n) \
  927. MMC_ACTS_DEFINE_CONFIG(n); \
  928. static struct acts_mmc_data mmc_acts_dev_data_##n ; \
  929. DEVICE_DEFINE(mmc##n##_acts, \
  930. CONFIG_MMC_##n##_NAME, \
  931. &mmc_acts_init, mmc_acts_pm_control, &mmc_acts_dev_data_##n, \
  932. &mmc_acts_config_##n, POST_KERNEL, \
  933. 20, &mmc_acts_driver_api);
  934. #if IS_ENABLED(CONFIG_MMC_0)
  935. MMC_ACTS_DEVICE_INIT(0)
  936. #endif
  937. #if IS_ENABLED(CONFIG_MMC_1)
  938. MMC_ACTS_DEVICE_INIT(1)
  939. #endif
  940. #endif //#if IS_ENABLED(CONFIG_MMC_0)||IS_ENABLED(CONFIG_MMC_1)