vdma_acts.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * Copyright (c) 2018 Google LLC.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <device.h>
  7. #include <soc.h>
  8. #include <drivers/dma.h>
  9. #include "vdma_list.h"
  10. #include <board_cfg.h>
  11. #include <logging/log.h>
  12. LOG_MODULE_REGISTER(vdma_acts, CONFIG_DMA_LOG_LEVEL);
  13. #define DMA_INVALID_CHAN (0xff)
  14. #define DMA_VCHAN_START_ID 32
  15. #define DMA_VCHAN_MAX_NUM CONFIG_DMA_0_VCHAN_NUM
  16. #define DMA_VCHAN_PCHAN_NUM CONFIG_DMA_0_VCHAN_PCHAN_NUM
  17. #define DMA_VCHAN_PCHAN_START CONFIG_DMA_0_VCHAN_PCHAN_START
  18. #define DMA_VCHAN_PCHAN_END (DMA_VCHAN_PCHAN_NUM + DMA_VCHAN_PCHAN_START)
  19. #define DMA_PCHAN_IS_VCHAN(id) (((id >= DMA_VCHAN_PCHAN_START) && (id < DMA_VCHAN_PCHAN_END))?1:0)
  20. #define DMA_ID_MEM 0
  21. #define MAX_DMA_CH CONFIG_DMA_0_PCHAN_NUM
  22. #define DMA_CHAN(base, ch) ((struct dma_regs*)((base) + (ch+1) * 0x100))
  23. /* Maximum data sent in single transfer (Bytes) */
  24. #define DMA_ACTS_MAX_DATA_ITEMS 0x7ffff
  25. /*dma ctl register*/
  26. #define DMA_CTL_SRC_TYPE_SHIFT 0
  27. #define DMA_CTL_SRC_TYPE(x) ((x) << DMA_CTL_SRC_TYPE_SHIFT)
  28. #define DMA_CTL_SRC_TYPE_MASK DMA_CTL_SRC_TYPE(0x3f)
  29. #define DMA_CTL_SAM_CONSTANT (0x1 << 7)
  30. #define DMA_CTL_DST_TYPE_SHIFT 8
  31. #define DMA_CTL_DST_TYPE(x) ((x) << DMA_CTL_DST_TYPE_SHIFT)
  32. #define DMA_CTL_DST_TYPE_MASK DMA_CTL_DST_TYPE(0x3f)
  33. #define DMA_CTL_DAM_CONSTANT (0x1 << 15)
  34. #define DMA_CTL_ADUDIO_TYPE_SHIFT 16
  35. #define DMA_CTL_ADUDIO_TYPE(x) ((x) << DMA_CTL_ADUDIO_TYPE_SHIFT)
  36. #define DMA_CTL_ADUDIO_TYPE_MASK DMA_CTL_ADUDIO_TYPE(0x1)
  37. #define DMA_CTL_ADUDIO_TYPE_INTER DMA_CTL_ADUDIO_TYPE(0)
  38. #define DMA_CTL_ADUDIO_TYPE_SEP DMA_CTL_ADUDIO_TYPE(1)
  39. #define DMA_CTL_TRM_SHIFT 17
  40. #define DMA_CTL_TRM(x) ((x) << DMA_CTL_TRM_SHIFT)
  41. #define DMA_CTL_TRM_MASK DMA_CTL_TRM(0x1)
  42. #define DMA_CTL_TRM_BURST8 DMA_CTL_TRM(0)
  43. #define DMA_CTL_TRM_SINGLE DMA_CTL_TRM(1)
  44. #define DMA_CTL_RELOAD (0x1 << 18)
  45. #define DMA_CTL_TWS_SHIFT 20
  46. #define DMA_CTL_TWS(x) ((x) << DMA_CTL_TWS_SHIFT)
  47. #define DMA_CTL_TWS_MASK DMA_CTL_TWS(0x3)
  48. #define DMA_CTL_TWS_8BIT DMA_CTL_TWS(2)
  49. #define DMA_CTL_TWS_16BIT DMA_CTL_TWS(1)
  50. #define DMA_CTL_TWS_32BIT DMA_CTL_TWS(0)
  51. /*dma pending register*/
  52. #define DMA_PD_TCIP(ch) (1 << ch)
  53. #define DMA_PD_HFIP(ch) (1 << (ch+16))
  54. /*dma irq enable register*/
  55. #define DMA_IE_TCIP(ch) (1 << ch)
  56. #define DMA_IE_HFIP(ch) (1 << (ch+16))
  57. #define DMA_START_START (0x1 << 0)
  58. #if 0
  59. #undef LOG_ERR
  60. #undef LOG_DBG
  61. #undef LOG_INF
  62. #define LOG_ERR printk
  63. #define LOG_INF printk
  64. #define LOG_DBG printk
  65. #endif
  66. /* dma channel registers */
  67. struct dma_regs {
  68. volatile uint32_t ctl;
  69. volatile uint32_t start;
  70. volatile uint32_t saddr0;
  71. volatile uint32_t saddr1;
  72. volatile uint32_t daddr0;
  73. volatile uint32_t daddr1;
  74. volatile uint32_t bc;
  75. volatile uint32_t rc;
  76. };
  77. struct dma_reg_g {
  78. volatile uint32_t dma_ip;
  79. volatile uint32_t dma_ie;
  80. };
  81. struct dma_pchan {
  82. dma_callback_t cb;
  83. void *cb_arg;
  84. unsigned short reload_count;
  85. uint8_t pchan_id; //init val, not change
  86. uint8_t vchan_id;
  87. uint8_t complete_callback_en : 1;
  88. uint8_t hcom_callback_en : 1;
  89. uint8_t channel_direction : 3;
  90. uint8_t busy : 1; //pchan is for requst, vchan is for used
  91. uint8_t isvchan : 1;
  92. uint8_t reserved : 1;
  93. };
  94. struct dma_vchan {
  95. struct list_head list;
  96. struct dma_regs dma_regs;
  97. struct dma_pchan pchan;
  98. unsigned char vchan_id; //init val, not change
  99. unsigned char pchan_id;// =DMA_INVALID_CHAN is not transfer ,else is transfer
  100. unsigned char busy; //for request
  101. };
  102. struct vdma_acts_data {
  103. uint32_t base;
  104. uint8_t pchan_num;
  105. uint8_t vchan_num;
  106. struct dma_pchan pchan[MAX_DMA_CH];
  107. struct list_head dma_req_list;
  108. };
  109. #define DEV_DATA(dev) \
  110. ((struct vdma_acts_data *const)(dev)->data)
  111. static struct vdma_acts_data vdmac_data;
  112. static struct dma_vchan dma_vchan_data[DMA_VCHAN_MAX_NUM];
  113. DEVICE_DECLARE(vdma_acts_0);
  114. #if defined(CONFIG_DMA_DBG_DUMP)
  115. static void dma_acts_dump_reg(struct vdma_acts_data *ddev, uint32_t id)
  116. {
  117. struct dma_regs *cregs = DMA_CHAN(ddev->base, id);
  118. printk("Using channel: %d \n", id);
  119. printk(" DMA_CTL: 0x%x \n", cregs->ctl);
  120. printk(" DMA_SADDR0: 0x%x \n", cregs->saddr0);
  121. printk(" DMA_SADDR1: 0x%x \n", cregs->saddr1);
  122. printk(" DMA_DADDR0: 0x%x \n", cregs->daddr0);
  123. printk(" DMA_DADDR1: 0x%x \n", cregs->daddr1);
  124. printk(" DMA_LEN: 0x%x \n", cregs->bc);
  125. printk(" DMA_RMAIN_LEN: 0x%x \n", cregs->rc);
  126. }
  127. void dma_dump_info(void)
  128. {
  129. int i;
  130. struct dma_vchan *vchan;
  131. struct dma_pchan *pchan;
  132. struct vdma_acts_data *ddev = &vdmac_data;
  133. list_for_each_entry(vchan, &ddev->dma_req_list, list) {
  134. LOG_INF("vchan=%d, in req list\n", vchan->vchan_id);
  135. }
  136. LOG_INF("----vchan = %d stauts:--------\n", DMA_VCHAN_MAX_NUM);
  137. for(i = 0; i < DMA_VCHAN_MAX_NUM; i++) {
  138. vchan = &dma_vchan_data[i];
  139. printk("%d:vid=%d, busy=%d,start=%d, pch=%d\n",
  140. i, vchan->vchan_id, vchan->busy, vchan->dma_regs.start,
  141. vchan->pchan_id);
  142. }
  143. LOG_INF("----pchan= %d stauts:--------\n", ddev->pchan_num);
  144. for(i= 0; i < ddev->pchan_num; i++){
  145. pchan = &ddev->pchan[i];
  146. printk("%d:pid=%d, busy=%d,vid=%d, isv=%d:\n",
  147. i, pchan->pchan_id, pchan->busy, pchan->vchan_id,
  148. pchan->isvchan);
  149. dma_acts_dump_reg(ddev, i);
  150. }
  151. }
  152. #endif
  153. static void vdma_vchan_start_tran(struct dma_vchan *vchan, struct dma_pchan *chan)
  154. {
  155. struct vdma_acts_data *ddev = &vdmac_data;
  156. struct dma_regs *cregs = DMA_CHAN(ddev->base, chan->pchan_id);
  157. struct dma_regs *dma_reg = &vchan->dma_regs;
  158. struct dma_reg_g *gregs = (struct dma_reg_g *)ddev->base;
  159. chan->busy = 1;
  160. chan->vchan_id = vchan->vchan_id;
  161. vchan->pchan_id = chan->pchan_id;
  162. chan->channel_direction = vchan->pchan.channel_direction;
  163. cregs->saddr0 = dma_reg->saddr0;
  164. cregs->daddr0 = dma_reg->daddr0;
  165. cregs->saddr1 = dma_reg->saddr0;
  166. cregs->daddr1 = dma_reg->daddr1;
  167. cregs->bc = dma_reg->bc;
  168. cregs->ctl = dma_reg->ctl;
  169. /* clear old irq pending */
  170. gregs->dma_ip = DMA_PD_TCIP(chan->pchan_id);
  171. gregs->dma_ie &= ~( DMA_IE_TCIP(chan->pchan_id) | DMA_IE_HFIP(chan->pchan_id));
  172. gregs->dma_ie |= DMA_IE_TCIP(chan->pchan_id);
  173. if(vchan->pchan.hcom_callback_en)
  174. gregs->dma_ie |= DMA_IE_HFIP(chan->pchan_id);
  175. /* start dma transfer */
  176. cregs->start |= DMA_START_START;
  177. }
  178. static void vdma_vchan_free_pchan(struct vdma_acts_data *ddev, struct dma_pchan *chan)
  179. {
  180. struct dma_vchan *vchan;
  181. chan->busy = 0;
  182. chan->vchan_id = DMA_INVALID_CHAN;
  183. /*start next vchan transfer*/
  184. if(!list_empty(&ddev->dma_req_list)) {
  185. vchan = (struct dma_vchan *) list_first_entry(&ddev->dma_req_list, struct dma_vchan, list);
  186. list_del(&vchan->list);
  187. vdma_vchan_start_tran(vchan, chan);
  188. }
  189. }
  190. /* Handles DMA interrupts and dispatches to the individual channel */
  191. static void vdma_acts_isr(void *arg)
  192. {
  193. uint32_t id = (uint32_t) arg;
  194. const struct device *dev = DEVICE_GET(vdma_acts_0);
  195. struct vdma_acts_data *ddev = &vdmac_data;
  196. struct dma_regs *cregs = DMA_CHAN(ddev->base, id);
  197. struct dma_reg_g *gregs = (struct dma_reg_g *)ddev->base;
  198. struct dma_pchan *chan = &ddev->pchan[id];
  199. struct dma_pchan *tmp;
  200. struct dma_vchan *vchan;
  201. unsigned int flags;
  202. uint32_t hf_pending, tc_pending;
  203. if(id != chan->pchan_id){
  204. printk("error: chan id=%d\n", chan->pchan_id);
  205. return;
  206. }
  207. if (id >= ddev->pchan_num)
  208. return;
  209. hf_pending = DMA_PD_HFIP(id) &
  210. gregs->dma_ip & gregs->dma_ie;
  211. tc_pending = DMA_PD_TCIP(id) &
  212. gregs->dma_ip & gregs->dma_ie;
  213. /* clear pending */
  214. gregs->dma_ip = tc_pending | hf_pending;
  215. if((tc_pending|hf_pending) == 0)
  216. return;
  217. /* process full complete callback */
  218. flags = irq_lock();
  219. if(chan->isvchan){
  220. if(chan->vchan_id < DMA_VCHAN_MAX_NUM &&
  221. dma_vchan_data[chan->vchan_id].busy) {
  222. vchan = &dma_vchan_data[chan->vchan_id];
  223. tmp = &vchan->pchan;
  224. if(tc_pending){
  225. if(cregs->ctl & DMA_CTL_RELOAD){
  226. tmp->reload_count++;
  227. }else{
  228. vchan->pchan_id = DMA_INVALID_CHAN;//finshed
  229. vchan->dma_regs.rc = cregs->rc;
  230. vchan->dma_regs.bc = cregs->bc;
  231. vchan->dma_regs.start = 0; // stop
  232. vdma_vchan_free_pchan(ddev,chan);
  233. }
  234. }
  235. if(tmp->cb && tmp->complete_callback_en) {
  236. tmp->cb(dev, tmp->cb_arg, chan->vchan_id+DMA_VCHAN_START_ID,
  237. !!hf_pending);
  238. }
  239. }else{
  240. vdma_vchan_free_pchan(ddev,chan);
  241. printk("vchan irq error: pid=%d, vid=%d \n", id, chan->vchan_id);
  242. }
  243. }else{
  244. if(chan->cb && chan->complete_callback_en) {
  245. chan->cb(dev, chan->cb_arg, id, !!hf_pending);
  246. }
  247. if(cregs->ctl & DMA_CTL_RELOAD)
  248. chan->reload_count++;
  249. }
  250. irq_unlock(flags);
  251. }
  252. /* Configure a channel */
  253. static int vdma_acts_get_vchan(struct vdma_acts_data *ddev, uint32_t channel,
  254. struct dma_vchan **vchan)
  255. {
  256. struct dma_pchan *pchan;
  257. uint32_t ch;
  258. *vchan = NULL;
  259. if (channel >= DMA_VCHAN_START_ID) {
  260. if(channel >= DMA_VCHAN_START_ID+DMA_VCHAN_MAX_NUM) {
  261. LOG_ERR("VDMA error:ch=%d > dma max chan=%d\n", channel,
  262. DMA_VCHAN_START_ID+DMA_VCHAN_MAX_NUM);
  263. return -EINVAL;
  264. }
  265. ch = channel-DMA_VCHAN_START_ID;
  266. if(dma_vchan_data[ch].vchan_id != ch) {
  267. LOG_ERR("vchanid err:%d != %d\n", dma_vchan_data[ch].vchan_id, ch);
  268. return -EINVAL;
  269. }
  270. if(!dma_vchan_data[ch].busy) {
  271. LOG_ERR("vchanid err:%d is not request\n", ch);
  272. return -EINVAL;
  273. }
  274. *vchan = &dma_vchan_data[ch];
  275. }else{
  276. if(channel >= ddev->pchan_num) {
  277. LOG_ERR("PDMA error:ch=%d > dma max chan=%d\n", channel,
  278. ddev->pchan_num);
  279. return -EINVAL;
  280. }
  281. *vchan = NULL;
  282. pchan = &ddev->pchan[channel];
  283. if(pchan->pchan_id != channel) {
  284. LOG_ERR("err: pchan id :%d != %d\n",pchan->pchan_id, channel);
  285. return -EINVAL;
  286. }
  287. if(pchan->isvchan){
  288. LOG_ERR("error:pch=%d is vchan\n", channel);
  289. return -EINVAL;
  290. }
  291. if(!pchan->busy){
  292. LOG_ERR("error:pch=%d is not request\n", channel);
  293. return -EINVAL;
  294. }
  295. }
  296. return 0;
  297. }
  298. /* Configure a channel */
  299. static int vdma_acts_config(const struct device *dev, uint32_t channel,
  300. struct dma_config *config)
  301. {
  302. struct vdma_acts_data *ddev = DEV_DATA(dev);
  303. struct dma_regs *cregs;
  304. struct dma_vchan *vchan = NULL;
  305. struct dma_pchan *pchan;
  306. struct dma_block_config *head_block = config->head_block;
  307. uint32_t ctl;
  308. int data_width = 0;
  309. int ret;
  310. uint32_t key;
  311. if (head_block->block_size > DMA_ACTS_MAX_DATA_ITEMS) {
  312. LOG_ERR("DMA error: Data size too big: %d",
  313. head_block->block_size);
  314. return -EINVAL;
  315. }
  316. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  317. if(ret){
  318. LOG_DBG("err cfg\n");
  319. return ret;
  320. }
  321. key = irq_lock();
  322. if (vchan) {
  323. cregs = &vchan->dma_regs;
  324. pchan = &vchan->pchan;
  325. }else{
  326. cregs = DMA_CHAN(ddev->base, channel);
  327. pchan = &ddev->pchan[channel];
  328. }
  329. if (config->complete_callback_en || config->error_callback_en) {
  330. pchan->cb = config->dma_callback;
  331. pchan->cb_arg = config->user_data;
  332. pchan->complete_callback_en = config->complete_callback_en;
  333. } else {
  334. pchan->cb = NULL;
  335. pchan->complete_callback_en = 0;
  336. }
  337. pchan->hcom_callback_en = 0;
  338. cregs->saddr0 = (uint32_t)head_block->source_address;
  339. cregs->daddr0 = (uint32_t)head_block->dest_address;
  340. cregs->bc = (uint32_t)head_block->block_size;
  341. pchan->channel_direction = config->channel_direction;
  342. pchan->reload_count = 0;
  343. if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
  344. ctl = DMA_CTL_SRC_TYPE(DMA_ID_MEM) |
  345. DMA_CTL_DST_TYPE(config->dma_slot) |
  346. DMA_CTL_DAM_CONSTANT;
  347. } else if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
  348. ctl = DMA_CTL_SRC_TYPE(config->dma_slot) |
  349. DMA_CTL_SAM_CONSTANT |
  350. DMA_CTL_DST_TYPE(DMA_ID_MEM);
  351. } else {
  352. ctl = DMA_CTL_SRC_TYPE(DMA_ID_MEM) |
  353. DMA_CTL_DST_TYPE(DMA_ID_MEM);
  354. }
  355. /** extern for actions dma interleaved mode */
  356. if (config->reserved == 1 && config->channel_direction == MEMORY_TO_PERIPHERAL) {
  357. cregs->saddr1 = (uint32_t)head_block->source_address;
  358. ctl |= DMA_CTL_ADUDIO_TYPE_SEP;
  359. }else if(config->reserved == 1 && config->channel_direction == PERIPHERAL_TO_MEMORY) {
  360. cregs->daddr1 = (uint32_t)head_block->dest_address;
  361. ctl |= DMA_CTL_ADUDIO_TYPE_SEP;
  362. }
  363. if (config->source_burst_length == 1 || config->dest_burst_length == 1) {
  364. ctl |= DMA_CTL_TRM_SINGLE;
  365. }
  366. if (config->source_data_size) {
  367. data_width = config->source_data_size;
  368. }
  369. if (config->dest_data_size) {
  370. data_width = config->dest_data_size;
  371. }
  372. if (head_block->source_reload_en || head_block->dest_reload_en) {
  373. ctl |= DMA_CTL_RELOAD;
  374. pchan->hcom_callback_en = 1;
  375. }
  376. switch (data_width) {
  377. case 2:
  378. ctl |= DMA_CTL_TWS_16BIT;
  379. break;
  380. case 4:
  381. ctl |= DMA_CTL_TWS_32BIT;
  382. break;
  383. case 1:
  384. default:
  385. ctl |= DMA_CTL_TWS_8BIT;
  386. break;
  387. }
  388. cregs->ctl = ctl;
  389. irq_unlock(key);
  390. return 0;
  391. }
  392. static struct dma_pchan * vdma_acts_get_free_chan(struct vdma_acts_data *ddev)
  393. {
  394. uint32_t i, ch;
  395. struct dma_pchan *pch;
  396. for(i = 0; i < DMA_VCHAN_PCHAN_NUM; i++){
  397. ch = i + DMA_VCHAN_PCHAN_START;
  398. pch = &ddev->pchan[ch];
  399. if((!pch->isvchan) || (pch->pchan_id !=ch)){
  400. LOG_ERR("vchan err:isv=%d, id=%d != %d\n",pch->isvchan, pch->pchan_id, ch);
  401. }
  402. if(!pch->busy)
  403. return pch;
  404. }
  405. return NULL;
  406. }
  407. static int vdma_acts_start(const struct device *dev, uint32_t channel)
  408. {
  409. struct vdma_acts_data *ddev = DEV_DATA(dev);
  410. struct dma_vchan *vchan = NULL;
  411. struct dma_pchan *pchan, *tmp;
  412. struct dma_regs *cregs;
  413. struct dma_reg_g *gregs = (struct dma_reg_g *)ddev->base;
  414. uint32_t key;
  415. int ret;
  416. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  417. if(ret){
  418. LOG_DBG("err start\n");
  419. return ret;
  420. }
  421. key = irq_lock();
  422. if(vchan) {
  423. cregs = &vchan->dma_regs;
  424. if(!cregs->start) {
  425. pchan = &vchan->pchan;
  426. cregs->start = 1; // start flag
  427. tmp = vdma_acts_get_free_chan(ddev);
  428. if(tmp){
  429. vdma_vchan_start_tran(vchan, tmp); // start tansfer
  430. }else{
  431. list_add_tail(&vchan->list, &ddev->dma_req_list); //wait
  432. }
  433. }
  434. }else{
  435. cregs = DMA_CHAN(ddev->base, channel);
  436. pchan = &ddev->pchan[channel];
  437. /* clear old irq pending */
  438. gregs->dma_ip = DMA_PD_TCIP(channel) | DMA_PD_HFIP(channel);
  439. gregs->dma_ie &= ~( DMA_IE_TCIP(channel) | DMA_IE_HFIP(channel));
  440. /* enable dma channel full complete irq? */
  441. if (pchan->complete_callback_en) {
  442. gregs->dma_ie |= DMA_IE_TCIP(channel) ;
  443. /*DMA_CTL_RELOAD use half complete irq*/
  444. if(pchan->hcom_callback_en)
  445. gregs->dma_ie |= DMA_IE_HFIP(channel) ;
  446. }
  447. /* start dma transfer */
  448. cregs->start |= DMA_START_START;
  449. }
  450. irq_unlock(key);
  451. return 0;
  452. }
  453. static void _vdma_acts_stop(struct dma_regs *cregs, struct dma_reg_g *gregs, uint32_t ch)
  454. {
  455. gregs->dma_ie &= ~( DMA_IE_TCIP(ch) | DMA_IE_HFIP(ch));
  456. /* clear old irq pending */
  457. gregs->dma_ip = DMA_PD_TCIP(ch) | DMA_PD_HFIP(ch);
  458. /* disable reload brefore stop dma */
  459. cregs->ctl &= ~DMA_CTL_RELOAD;
  460. cregs->start &= ~DMA_START_START;
  461. }
  462. static int vdma_acts_stop(const struct device *dev, uint32_t channel)
  463. {
  464. struct vdma_acts_data *ddev = DEV_DATA(dev);
  465. struct dma_vchan *vchan = NULL;
  466. struct dma_pchan *pchan;
  467. struct dma_regs *cregs;
  468. struct dma_reg_g *gregs = (struct dma_reg_g *)ddev->base;
  469. uint32_t key;
  470. int ret;
  471. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  472. if(ret){
  473. LOG_DBG("err stop\n");
  474. return ret;
  475. }
  476. key = irq_lock();
  477. if(vchan) {
  478. if(vchan->dma_regs.start){
  479. if(vchan->pchan_id != DMA_INVALID_CHAN){// is transfering
  480. cregs = DMA_CHAN(ddev->base, vchan->pchan_id);
  481. _vdma_acts_stop(cregs, gregs, vchan->pchan_id);
  482. pchan = &ddev->pchan[vchan->pchan_id];
  483. vdma_vchan_free_pchan(ddev, pchan);
  484. vchan->pchan_id = DMA_INVALID_CHAN;
  485. }else{// del list
  486. list_del_init(&vchan->list);
  487. }
  488. vchan->dma_regs.start = 0;
  489. }
  490. }else{
  491. cregs = DMA_CHAN(ddev->base, channel);
  492. _vdma_acts_stop(cregs, gregs, channel);
  493. }
  494. irq_unlock(key);
  495. return 0;
  496. }
  497. static int vdma_acts_reload(const struct device *dev, uint32_t channel,
  498. uint32_t src, uint32_t dst, size_t size)
  499. {
  500. struct vdma_acts_data *ddev = DEV_DATA(dev);
  501. struct dma_vchan *vchan = NULL;
  502. struct dma_regs *cregs;
  503. uint32_t key;
  504. int ret;
  505. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  506. if(ret){
  507. LOG_DBG("err reload\n");
  508. return ret;
  509. }
  510. key = irq_lock();
  511. if(vchan){
  512. cregs = &vchan->dma_regs;
  513. if(vchan->dma_regs.start && (vchan->pchan_id != DMA_INVALID_CHAN)){
  514. // is transfering
  515. cregs->saddr0 = src;
  516. cregs->daddr0 = dst;
  517. cregs->bc = size;
  518. cregs = DMA_CHAN(ddev->base, vchan->pchan_id);
  519. }
  520. }else{
  521. cregs = DMA_CHAN(ddev->base, channel);
  522. }
  523. cregs->saddr0 = src;
  524. cregs->daddr0 = dst;
  525. cregs->bc = size;
  526. irq_unlock(key);
  527. return 0;
  528. }
  529. static int vdma_acts_get_status(const struct device *dev, uint32_t channel,
  530. struct dma_status *stat)
  531. {
  532. struct vdma_acts_data *ddev = DEV_DATA(dev);
  533. struct dma_vchan *vchan = NULL;
  534. struct dma_regs *cregs;
  535. uint32_t key;
  536. int ret;
  537. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  538. if(ret){
  539. LOG_DBG("err status\n");
  540. return ret;
  541. }
  542. key = irq_lock();
  543. if(vchan){
  544. cregs = &vchan->dma_regs;
  545. if(vchan->dma_regs.start && (vchan->pchan_id != DMA_INVALID_CHAN)){
  546. cregs = DMA_CHAN(ddev->base, vchan->pchan_id);//is transfering
  547. }
  548. }else{
  549. cregs = DMA_CHAN(ddev->base, channel);
  550. }
  551. if (cregs->start) {
  552. stat->busy = true;
  553. stat->pending_length = cregs->rc;
  554. } else {
  555. stat->busy = false;
  556. stat->pending_length = 0;
  557. }
  558. irq_unlock(key);
  559. return 0;
  560. }
  561. static int vdma_acts_request(const struct device *dev, uint32_t channel)
  562. {
  563. struct vdma_acts_data *ddev = DEV_DATA(dev);
  564. struct dma_vchan *vchan;
  565. uint32_t key;
  566. int ret = -EINVAL;
  567. int i;
  568. if (channel != DMA_INVALID_CHAN) { //pchan
  569. if((channel >= ddev->pchan_num) || DMA_PCHAN_IS_VCHAN(channel)){
  570. printk("request pchan=%d err\n", channel);
  571. return -EINVAL;
  572. }
  573. key = irq_lock();
  574. if(ddev->pchan[channel].busy){
  575. printk("request pchan id%d already used\n", channel);
  576. ret = -EINVAL;
  577. }else{
  578. ret = channel;
  579. ddev->pchan[channel].busy = 1;
  580. }
  581. irq_unlock(key);
  582. } else {// vchan
  583. key = irq_lock();
  584. for(i= 0; i < DMA_VCHAN_MAX_NUM; i++){
  585. vchan = &dma_vchan_data[i];
  586. if(vchan->vchan_id != i){
  587. printk("err err: vchan id=%d\n", i);
  588. }
  589. if(!vchan->busy){
  590. vchan->busy = 1;
  591. vchan->pchan_id = DMA_INVALID_CHAN;
  592. ret = i+ DMA_VCHAN_START_ID;
  593. break;
  594. }
  595. }
  596. irq_unlock(key);
  597. if(ret < 0){
  598. printk("request vchan fail\n");
  599. }
  600. }
  601. return ret;
  602. }
  603. static void vdma_acts_free(const struct device *dev, uint32_t channel)
  604. {
  605. struct vdma_acts_data *ddev = DEV_DATA(dev);
  606. struct dma_vchan *vchan = NULL;
  607. struct dma_pchan *pchan;
  608. int ret;
  609. uint32_t key;
  610. vdma_acts_stop(dev, channel);
  611. ret = vdma_acts_get_vchan(ddev, channel, &vchan);
  612. if(ret){
  613. LOG_DBG("err free\n");
  614. return;
  615. }
  616. key = irq_lock();
  617. if(vchan){
  618. vchan->busy = 0;
  619. vchan->pchan_id = DMA_INVALID_CHAN;
  620. }else{
  621. pchan = &ddev->pchan[channel];
  622. pchan->busy = 0;
  623. }
  624. irq_unlock(key);
  625. }
  626. #define DMA_ACTS_IRQ_CONNECT(n) \
  627. do { \
  628. IRQ_CONNECT((IRQ_ID_DMA0+n), \
  629. CONFIG_DMA_IRQ_PRI, \
  630. vdma_acts_isr, n, 0); \
  631. irq_enable((IRQ_ID_DMA0+n)); \
  632. } while (0)
  633. #define DMA_NOT_RESERVE(chan) ((CONFIG_DMA_LCD_RESEVER_CHAN!=chan) && (CONFIG_DMA_SPINAND_RESEVER_CHAN!=chan))
  634. static int vdma_acts_init(const struct device *dev)
  635. {
  636. struct vdma_acts_data *data = DEV_DATA(dev);
  637. int i;
  638. struct dma_pchan *pchan;
  639. struct dma_vchan *vchan;
  640. data->base = DMA_REG_BASE;
  641. acts_clock_peripheral_enable(CLOCK_ID_DMA);
  642. acts_reset_peripheral(RESET_ID_DMA);
  643. data->pchan_num = MAX_DMA_CH;
  644. #if MAX_DMA_CH > 0
  645. #if DMA_NOT_RESERVE(0)
  646. DMA_ACTS_IRQ_CONNECT(0);
  647. #endif
  648. #endif
  649. #if MAX_DMA_CH > 1
  650. #if DMA_NOT_RESERVE(1)
  651. DMA_ACTS_IRQ_CONNECT(1);
  652. #endif
  653. #endif
  654. #if MAX_DMA_CH > 2
  655. #if DMA_NOT_RESERVE(2)
  656. DMA_ACTS_IRQ_CONNECT(2);
  657. #endif
  658. #endif
  659. #if MAX_DMA_CH > 3
  660. #if DMA_NOT_RESERVE(3)
  661. DMA_ACTS_IRQ_CONNECT(3);
  662. #endif
  663. #endif
  664. #if MAX_DMA_CH > 4
  665. #if DMA_NOT_RESERVE(4)
  666. DMA_ACTS_IRQ_CONNECT(4);
  667. #endif
  668. #endif
  669. #if MAX_DMA_CH > 5
  670. #if DMA_NOT_RESERVE(5)
  671. DMA_ACTS_IRQ_CONNECT(5);
  672. #endif
  673. #endif
  674. #if MAX_DMA_CH > 6
  675. #if DMA_NOT_RESERVE(6)
  676. DMA_ACTS_IRQ_CONNECT(6);
  677. #endif
  678. #endif
  679. #if MAX_DMA_CH > 7
  680. #if DMA_NOT_RESERVE(7)
  681. DMA_ACTS_IRQ_CONNECT(7);
  682. #endif
  683. #endif
  684. #if MAX_DMA_CH > 8
  685. #if DMA_NOT_RESERVE(8)
  686. DMA_ACTS_IRQ_CONNECT(8);
  687. #endif
  688. #endif
  689. #if MAX_DMA_CH > 9
  690. #if DMA_NOT_RESERVE(9)
  691. DMA_ACTS_IRQ_CONNECT(9);
  692. #endif
  693. #endif
  694. printk("pchan=%d; vchan=%d, pnum=%d,start=%d\n", data->pchan_num,
  695. DMA_VCHAN_MAX_NUM, DMA_VCHAN_PCHAN_NUM, DMA_VCHAN_PCHAN_START);
  696. //if(DMA_VCHAN_PCHAN_END > data->pchan_num){
  697. //printk("error vchan config\n");
  698. //return -1;
  699. //}
  700. INIT_LIST_HEAD(&data->dma_req_list);
  701. for(i= 0; i < data->pchan_num; i++){
  702. pchan = &data->pchan[i];
  703. pchan->pchan_id = i;
  704. pchan->vchan_id = DMA_INVALID_CHAN;
  705. pchan->busy = 0;
  706. if(DMA_PCHAN_IS_VCHAN(i))
  707. pchan->isvchan = 1;
  708. else
  709. pchan->isvchan = 0;
  710. }
  711. for(i= 0; i < DMA_VCHAN_MAX_NUM; i++){
  712. vchan = &dma_vchan_data[i];
  713. vchan->busy = 0;
  714. vchan->vchan_id = i;
  715. vchan->pchan_id = DMA_INVALID_CHAN;
  716. INIT_LIST_HEAD(&vchan->list);
  717. }
  718. #if CONFIG_DMA_LCD_RESEVER_CHAN < MAX_DMA_CH
  719. data->pchan[CONFIG_DMA_LCD_RESEVER_CHAN].busy = 1; //reserve for LCD
  720. #endif
  721. #if CONFIG_DMA_SPINAND_RESEVER_CHAN < MAX_DMA_CH
  722. data->pchan[CONFIG_DMA_SPINAND_RESEVER_CHAN].busy = 1; //reserve for SPINAND
  723. #endif
  724. return 0;
  725. }
  726. static const struct dma_driver_api vdma_acts_api = {
  727. .config = vdma_acts_config,
  728. .start = vdma_acts_start,
  729. .stop = vdma_acts_stop,
  730. .reload = vdma_acts_reload,
  731. .get_status = vdma_acts_get_status,
  732. .request = vdma_acts_request,
  733. .free = vdma_acts_free,
  734. };
  735. DEVICE_DEFINE(vdma_acts_0, CONFIG_DMA_0_NAME, &vdma_acts_init, NULL,
  736. &vdmac_data, NULL, POST_KERNEL,
  737. 1, &vdma_acts_api);