pdma_acts.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * Copyright (c) 2018 Google LLC.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <device.h>
  7. #include <soc.h>
  8. #include <drivers/dma.h>
  9. #include <board_cfg.h>
  10. #include <logging/log.h>
  11. LOG_MODULE_REGISTER(dma_acts, CONFIG_DMA_LOG_LEVEL);
  12. #define DMA_INVALID_CHAN (0xff)
  13. #define DMA_ID_MEM 0
  14. #define MAX_DMA_CH CONFIG_DMA_0_PCHAN_NUM
  15. #define DMA_CHAN(base, ch) ((struct acts_dma_chan_reg*)((base) + (ch+1) * 0x100))
  16. /* Maximum data sent in single transfer (Bytes) */
  17. #define DMA_ACTS_MAX_DATA_ITEMS 0x7ffff
  18. /*dma ctl register*/
  19. #define DMA_CTL_SRC_TYPE_SHIFT 0
  20. #define DMA_CTL_SRC_TYPE(x) ((x) << DMA_CTL_SRC_TYPE_SHIFT)
  21. #define DMA_CTL_SRC_TYPE_MASK DMA_CTL_SRC_TYPE(0x3f)
  22. #define DMA_CTL_SAM_CONSTANT (0x1 << 7)
  23. #define DMA_CTL_DST_TYPE_SHIFT 8
  24. #define DMA_CTL_DST_TYPE(x) ((x) << DMA_CTL_DST_TYPE_SHIFT)
  25. #define DMA_CTL_DST_TYPE_MASK DMA_CTL_DST_TYPE(0x3f)
  26. #define DMA_CTL_DAM_CONSTANT (0x1 << 15)
  27. #define DMA_CTL_ADUDIO_TYPE_SHIFT 16
  28. #define DMA_CTL_ADUDIO_TYPE(x) ((x) << DMA_CTL_ADUDIO_TYPE_SHIFT)
  29. #define DMA_CTL_ADUDIO_TYPE_MASK DMA_CTL_ADUDIO_TYPE(0x1)
  30. #define DMA_CTL_ADUDIO_TYPE_INTER DMA_CTL_ADUDIO_TYPE(0)
  31. #define DMA_CTL_ADUDIO_TYPE_SEP DMA_CTL_ADUDIO_TYPE(1)
  32. #define DMA_CTL_TRM_SHIFT 17
  33. #define DMA_CTL_TRM(x) ((x) << DMA_CTL_TRM_SHIFT)
  34. #define DMA_CTL_TRM_MASK DMA_CTL_TRM(0x1)
  35. #define DMA_CTL_TRM_BURST8 DMA_CTL_TRM(0)
  36. #define DMA_CTL_TRM_SINGLE DMA_CTL_TRM(1)
  37. #define DMA_CTL_RELOAD (0x1 << 18)
  38. #define DMA_CTL_TWS_SHIFT 20
  39. #define DMA_CTL_TWS(x) ((x) << DMA_CTL_TWS_SHIFT)
  40. #define DMA_CTL_TWS_MASK DMA_CTL_TWS(0x3)
  41. #define DMA_CTL_TWS_8BIT DMA_CTL_TWS(2)
  42. #define DMA_CTL_TWS_16BIT DMA_CTL_TWS(1)
  43. #define DMA_CTL_TWS_32BIT DMA_CTL_TWS(0)
  44. /*dma pending register*/
  45. #define DMA_PD_TCIP(ch) (1 << ch)
  46. #define DMA_PD_HFIP(ch) (1 << (ch+16))
  47. /*dma irq enable register*/
  48. #define DMA_IE_TCIP(ch) (1 << ch)
  49. #define DMA_IE_HFIP(ch) (1 << (ch+16))
  50. #define DMA_START_START (0x1 << 0)
  51. /* dma channel registers */
  52. struct acts_dma_chan_reg {
  53. volatile uint32_t ctl;
  54. volatile uint32_t start;
  55. volatile uint32_t saddr0;
  56. volatile uint32_t saddr1;
  57. volatile uint32_t daddr0;
  58. volatile uint32_t daddr1;
  59. volatile uint32_t bc;
  60. volatile uint32_t rc;
  61. };
  62. struct acts_dma_reg {
  63. volatile uint32_t dma_ip;
  64. volatile uint32_t dma_ie;
  65. };
  66. struct dma_acts_channel {
  67. dma_callback_t cb;
  68. void *cb_arg;
  69. uint16_t reload_count;
  70. uint16_t complete_callback_en : 1;
  71. uint16_t hcom_callback_en : 1;
  72. uint16_t channel_direction : 3;
  73. uint16_t busy : 1;
  74. uint16_t reserved : 10;
  75. };
  76. struct dma_acts_data {
  77. uint32_t base;
  78. int chan_num;
  79. struct dma_acts_channel channels[MAX_DMA_CH];
  80. };
  81. #define DEV_DATA(dev) \
  82. ((struct dma_acts_data *const)(dev)->data)
  83. static struct dma_acts_data dmac_data;
  84. DEVICE_DECLARE(dma_acts_0);
  85. #if defined(CONFIG_DMA_DBG_DUMP)
  86. static void dma_acts_dump_reg(struct dma_acts_data *ddev, uint32_t id)
  87. {
  88. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, id);
  89. LOG_INF("Using channel: %d", id);
  90. LOG_INF(" DMA_CTL: 0x%x", cregs->ctl);
  91. LOG_INF(" DMA_SADDR0: 0x%x", cregs->saddr0);
  92. LOG_INF(" DMA_SADDR1: 0x%x", cregs->saddr1);
  93. LOG_INF(" DMA_DADDR0: 0x%x", cregs->daddr0);
  94. LOG_INF(" DMA_DADDR1: 0x%x", cregs->daddr1);
  95. LOG_INF(" DMA_LEN: 0x%x", cregs->bc);
  96. LOG_INF(" DMA_RMAIN_LEN: 0x%x", cregs->rc);
  97. }
  98. void dma_dump_info(void)
  99. {
  100. int i;
  101. struct dma_acts_channel *pchan;
  102. struct dma_acts_data *ddev = &dmac_data;
  103. LOG_INF("----pchan= %d stauts:--------\n", ddev->chan_num);
  104. for(i= 0; i < ddev->chan_num; i++){
  105. pchan = &ddev->channels[i];
  106. printk("chan%d: isbusy=%d\n", i, pchan->busy);
  107. dma_acts_dump_reg(ddev, i);
  108. }
  109. }
  110. #endif
  111. /* Handles DMA interrupts and dispatches to the individual channel */
  112. static void dma_acts_isr(void *arg)
  113. {
  114. uint32_t id = (uint32_t) arg;
  115. const struct device *dev = DEVICE_GET(dma_acts_0);
  116. struct dma_acts_data *ddev = &dmac_data;
  117. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, id);
  118. struct acts_dma_reg *gregs = (struct acts_dma_reg *)ddev->base;
  119. struct dma_acts_channel *chan = &ddev->channels[id];
  120. uint32_t hf_pending, tc_pending;
  121. if (id >= ddev->chan_num)
  122. return;
  123. hf_pending = DMA_PD_HFIP(id) &
  124. gregs->dma_ip & gregs->dma_ie;
  125. tc_pending = DMA_PD_TCIP(id) &
  126. gregs->dma_ip & gregs->dma_ie;
  127. /* clear pending */
  128. gregs->dma_ip = tc_pending | hf_pending;
  129. if((tc_pending|hf_pending) == 0)
  130. return;
  131. /* process full complete callback */
  132. if ( chan->complete_callback_en && chan->cb) {
  133. chan->cb(dev, chan->cb_arg, id, !!hf_pending);
  134. }
  135. if(cregs->ctl & DMA_CTL_RELOAD)
  136. chan->reload_count++;
  137. }
  138. /* Configure a channel */
  139. static int dma_acts_config(const struct device *dev, uint32_t channel,
  140. struct dma_config *config)
  141. {
  142. struct dma_acts_data *ddev = DEV_DATA(dev);
  143. struct dma_acts_channel *chan = &ddev->channels[channel];
  144. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, channel);
  145. struct dma_block_config *head_block = config->head_block;
  146. uint32_t ctl;
  147. int data_width = 0;
  148. if (channel >= ddev->chan_num) {
  149. LOG_ERR("DMA error:ch=%d > dma max chan=%d\n",channel,
  150. ddev->chan_num);
  151. return -EINVAL;
  152. }
  153. if (head_block->block_size > DMA_ACTS_MAX_DATA_ITEMS) {
  154. LOG_ERR("DMA error: Data size too big: %d",
  155. head_block->block_size);
  156. return -EINVAL;
  157. }
  158. if (config->complete_callback_en || config->error_callback_en) {
  159. chan->cb = config->dma_callback;
  160. chan->cb_arg = config->user_data;
  161. chan->complete_callback_en = config->complete_callback_en;
  162. } else {
  163. chan->cb = NULL;
  164. chan->complete_callback_en = 0;
  165. }
  166. chan->hcom_callback_en = 0;
  167. cregs->saddr0 = (uint32_t)head_block->source_address;
  168. cregs->daddr0 = (uint32_t)head_block->dest_address;
  169. cregs->bc = (uint32_t)head_block->block_size;
  170. chan->channel_direction = config->channel_direction;
  171. chan->reload_count = 0;
  172. if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
  173. ctl = DMA_CTL_SRC_TYPE(DMA_ID_MEM) |
  174. DMA_CTL_DST_TYPE(config->dma_slot) |
  175. DMA_CTL_DAM_CONSTANT;
  176. } else if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
  177. ctl = DMA_CTL_SRC_TYPE(config->dma_slot) |
  178. DMA_CTL_SAM_CONSTANT |
  179. DMA_CTL_DST_TYPE(DMA_ID_MEM);
  180. } else {
  181. ctl = DMA_CTL_SRC_TYPE(DMA_ID_MEM) |
  182. DMA_CTL_DST_TYPE(DMA_ID_MEM);
  183. }
  184. /** extern for actions dma interleaved mode */
  185. if (config->reserved == 1 && config->channel_direction == MEMORY_TO_PERIPHERAL) {
  186. ctl |= DMA_CTL_ADUDIO_TYPE_SEP;
  187. }else if(config->reserved == 1 && config->channel_direction == PERIPHERAL_TO_MEMORY) {
  188. ctl |= DMA_CTL_ADUDIO_TYPE_SEP;
  189. }
  190. if (config->source_burst_length == 1 || config->dest_burst_length == 1) {
  191. ctl |= DMA_CTL_TRM_SINGLE;
  192. }
  193. if (config->source_data_size) {
  194. data_width = config->source_data_size;
  195. }
  196. if (config->dest_data_size) {
  197. data_width = config->dest_data_size;
  198. }
  199. if (head_block->source_reload_en || head_block->dest_reload_en) {
  200. ctl |= DMA_CTL_RELOAD;
  201. chan->hcom_callback_en = 1;
  202. }
  203. switch (data_width) {
  204. case 2:
  205. ctl |= DMA_CTL_TWS_16BIT;
  206. break;
  207. case 4:
  208. ctl |= DMA_CTL_TWS_32BIT;
  209. break;
  210. case 1:
  211. default:
  212. ctl |= DMA_CTL_TWS_8BIT;
  213. break;
  214. }
  215. cregs->ctl = ctl;
  216. return 0;
  217. }
  218. static int dma_acts_start(const struct device *dev, uint32_t channel)
  219. {
  220. struct dma_acts_data *ddev = DEV_DATA(dev);
  221. struct dma_acts_channel *chan = &ddev->channels[channel];
  222. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, channel);
  223. struct acts_dma_reg *gregs = (struct acts_dma_reg *)ddev->base;
  224. uint32_t key;
  225. if (channel >= ddev->chan_num) {
  226. return -EINVAL;
  227. }
  228. key = irq_lock();
  229. /* clear old irq pending */
  230. gregs->dma_ip = DMA_PD_TCIP(channel) | DMA_PD_HFIP(channel);
  231. gregs->dma_ie &= ~( DMA_IE_TCIP(channel) | DMA_IE_HFIP(channel));
  232. /* enable dma channel full complete irq? */
  233. if (chan->complete_callback_en) {
  234. gregs->dma_ie |= DMA_IE_TCIP(channel) ;
  235. /*DMA_CTL_RELOAD use half complete irq*/
  236. if(chan->hcom_callback_en)
  237. gregs->dma_ie |= DMA_IE_HFIP(channel) ;
  238. }
  239. /* set memory type such as interleaved or seperated for audio */
  240. if (cregs->ctl & DMA_CTL_ADUDIO_TYPE_SEP) {
  241. if ((cregs->ctl & DMA_CTL_DST_TYPE_MASK) == 0) {
  242. /* PERIPHERAL_TO_MEMORY */
  243. cregs->daddr1 = cregs->saddr0;
  244. } else {
  245. /* MEMORY_TO_PERIPHERAL */
  246. cregs->saddr1 = cregs->daddr0;
  247. }
  248. }
  249. /* start dma transfer */
  250. cregs->start |= DMA_START_START;
  251. irq_unlock(key);
  252. return 0;
  253. }
  254. static int dma_acts_stop(const struct device *dev, uint32_t channel)
  255. {
  256. struct dma_acts_data *ddev = DEV_DATA(dev);
  257. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, channel);
  258. struct acts_dma_reg *gregs = (struct acts_dma_reg *)ddev->base;
  259. uint32_t key;
  260. if (channel >= ddev->chan_num) {
  261. return -EINVAL;
  262. }
  263. key = irq_lock();
  264. gregs->dma_ie &= ~( DMA_IE_TCIP(channel) | DMA_IE_HFIP(channel));
  265. /* clear old irq pending */
  266. gregs->dma_ip = DMA_PD_TCIP(channel) | DMA_PD_HFIP(channel);
  267. /* disable reload brefore stop dma */
  268. cregs->ctl &= ~DMA_CTL_RELOAD;
  269. cregs->start &= ~DMA_START_START;
  270. irq_unlock(key);
  271. return 0;
  272. }
  273. static int dma_acts_reload(const struct device *dev, uint32_t channel,
  274. uint32_t src, uint32_t dst, size_t size)
  275. {
  276. struct dma_acts_data *ddev = DEV_DATA(dev);
  277. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, channel);
  278. uint32_t key;
  279. if (channel >= ddev->chan_num) {
  280. return -EINVAL;
  281. }
  282. key = irq_lock();
  283. cregs->saddr0 = src;
  284. cregs->daddr0 = dst;
  285. cregs->bc = size;
  286. irq_unlock(key);
  287. return 0;
  288. }
  289. static int dma_acts_get_status(const struct device *dev, uint32_t channel,
  290. struct dma_status *stat)
  291. {
  292. struct dma_acts_data *ddev = DEV_DATA(dev);
  293. struct acts_dma_chan_reg *cregs = DMA_CHAN(ddev->base, channel);
  294. struct dma_acts_channel *chan = &ddev->channels[channel];
  295. if (channel >= ddev->chan_num || stat == NULL) {
  296. return -EINVAL;
  297. }
  298. if (cregs->start) {
  299. stat->busy = true;
  300. stat->pending_length = cregs->rc;
  301. } else {
  302. stat->busy = false;
  303. stat->pending_length = 0;
  304. }
  305. stat->dir = chan->channel_direction;
  306. return 0;
  307. }
  308. static int dma_acts_request(const struct device *dev, uint32_t channel)
  309. {
  310. struct dma_acts_data *ddev = DEV_DATA(dev);
  311. int i;
  312. uint32_t key;
  313. int ret = -EINVAL;
  314. //printk("-------requset:dma chan%d \n", channel);
  315. if (channel != DMA_INVALID_CHAN) {
  316. if(channel >= ddev->chan_num){
  317. printk("request chan=%d max err\n", channel);
  318. return -EINVAL;
  319. }
  320. key = irq_lock();
  321. if(ddev->channels[channel].busy){
  322. printk("request chan id%d already used\n", channel);
  323. ret = -EINVAL;
  324. }else{
  325. ret = channel;
  326. ddev->channels[channel].busy = 1;
  327. }
  328. irq_unlock(key);
  329. }else{
  330. key = irq_lock();
  331. for(i = ddev->chan_num-1; i >= 0; i--){
  332. if(!ddev->channels[i].busy)
  333. break;
  334. }
  335. if(i >= 0){
  336. ret = i;
  337. ddev->channels[i].busy = 1;
  338. }
  339. irq_unlock(key);
  340. }
  341. //printk("--- alloc dma chan%d \n", ret);
  342. return ret;
  343. }
  344. static void dma_acts_free(const struct device *dev, uint32_t channel)
  345. {
  346. struct dma_acts_data *ddev = DEV_DATA(dev);
  347. uint32_t key;
  348. key = irq_lock();
  349. if(!ddev->channels[channel].busy){
  350. printk("err:dma chan%d is free\n", channel);
  351. }else{
  352. ddev->channels[channel].busy = 0;
  353. }
  354. irq_unlock(key);
  355. }
  356. #define DMA_ACTS_IRQ_CONNECT(n) \
  357. do { \
  358. IRQ_CONNECT((IRQ_ID_DMA0+n), \
  359. CONFIG_DMA_IRQ_PRI, \
  360. dma_acts_isr, n, 0); \
  361. irq_enable((IRQ_ID_DMA0+n)); \
  362. } while (0)
  363. #define DMA_NOT_RESERVE(chan) ((CONFIG_DMA_LCD_RESEVER_CHAN!=chan) \
  364. && (CONFIG_DMA_SPINAND_RESEVER_CHAN!=chan) \
  365. && (CONFIG_DMA_SPINOR_RESEVER_CHAN!=chan))
  366. static int dma_acts_init(const struct device *dev)
  367. {
  368. struct dma_acts_data *data = DEV_DATA(dev);
  369. data->base = DMA_REG_BASE;
  370. acts_clock_peripheral_enable(CLOCK_ID_DMA);
  371. acts_reset_peripheral(RESET_ID_DMA);
  372. data->chan_num = MAX_DMA_CH;
  373. #if MAX_DMA_CH > 0
  374. #if DMA_NOT_RESERVE(0)
  375. DMA_ACTS_IRQ_CONNECT(0);
  376. #endif
  377. #endif
  378. #if MAX_DMA_CH > 1
  379. #if DMA_NOT_RESERVE(1)
  380. DMA_ACTS_IRQ_CONNECT(1);
  381. #endif
  382. #endif
  383. #if MAX_DMA_CH > 2
  384. #if DMA_NOT_RESERVE(2)
  385. DMA_ACTS_IRQ_CONNECT(2);
  386. #endif
  387. #endif
  388. #if MAX_DMA_CH > 3
  389. #if DMA_NOT_RESERVE(3)
  390. DMA_ACTS_IRQ_CONNECT(3);
  391. #endif
  392. #endif
  393. #if MAX_DMA_CH > 4
  394. #if DMA_NOT_RESERVE(4)
  395. DMA_ACTS_IRQ_CONNECT(4);
  396. #endif
  397. #endif
  398. #if MAX_DMA_CH > 5
  399. #if DMA_NOT_RESERVE(5)
  400. DMA_ACTS_IRQ_CONNECT(5);
  401. #endif
  402. #endif
  403. #if MAX_DMA_CH > 6
  404. #if DMA_NOT_RESERVE(6)
  405. DMA_ACTS_IRQ_CONNECT(6);
  406. #endif
  407. #endif
  408. #if MAX_DMA_CH > 7
  409. #if DMA_NOT_RESERVE(7)
  410. DMA_ACTS_IRQ_CONNECT(7);
  411. #endif
  412. #endif
  413. #if MAX_DMA_CH > 8
  414. #if DMA_NOT_RESERVE(8)
  415. DMA_ACTS_IRQ_CONNECT(8);
  416. #endif
  417. #endif
  418. #if MAX_DMA_CH > 9
  419. #if DMA_NOT_RESERVE(9)
  420. DMA_ACTS_IRQ_CONNECT(9);
  421. #endif
  422. #endif
  423. printk("dma-num=%d\n", data->chan_num);
  424. #if CONFIG_DMA_LCD_RESEVER_CHAN < MAX_DMA_CH
  425. data->channels[CONFIG_DMA_LCD_RESEVER_CHAN].busy = 1; //reserve for LCD
  426. #endif
  427. #if CONFIG_DMA_SPINAND_RESEVER_CHAN < MAX_DMA_CH
  428. data->channels[CONFIG_DMA_SPINAND_RESEVER_CHAN].busy = 1; //reserve for SPINAND
  429. #endif
  430. #if CONFIG_DMA_SPINOR_RESEVER_CHAN < MAX_DMA_CH
  431. data->channels[CONFIG_DMA_SPINOR_RESEVER_CHAN].busy = 1; // reserve for SPINOR
  432. #endif
  433. return 0;
  434. }
  435. static const struct dma_driver_api dma_acts_api = {
  436. .config = dma_acts_config,
  437. .start = dma_acts_start,
  438. .stop = dma_acts_stop,
  439. .reload = dma_acts_reload,
  440. .get_status = dma_acts_get_status,
  441. .request = dma_acts_request,
  442. .free = dma_acts_free,
  443. };
  444. DEVICE_DEFINE(dma_acts_0, CONFIG_DMA_0_NAME, &dma_acts_init, NULL,
  445. &dmac_data, NULL, POST_KERNEL,
  446. 1, &dma_acts_api);