spi_acts.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. /*
  2. * Copyright (c) 2018 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief SPI driver for Actions SoC
  9. */
  10. #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
  11. #include <logging/log.h>
  12. LOG_MODULE_REGISTER(spi_acts);
  13. #include "spi_context.h"
  14. #include <errno.h>
  15. #include <device.h>
  16. #include <drivers/spi.h>
  17. #include <drivers/dma.h>
  18. #include <soc.h>
  19. #include <board_cfg.h>
  20. /* SPI registers macros*/
  21. #define SPI_CTL_CLK_SEL_MASK (0x1 << 31)
  22. #define SPI_CTL_CLK_SEL_CPU (0x0 << 31)
  23. #define SPI_CTL_CLK_SEL_DMA (0x1 << 31)
  24. #define SPI_CTL_FIFO_WIDTH_MASK (0x1 << 30)
  25. #define SPI_CTL_FIFO_WIDTH_8BIT (0x0 << 30)
  26. #define SPI_CTL_FIFO_WIDTH_32BIT (0x1 << 30)
  27. #define SPI_CTL_MODE_MASK (3 << 28)
  28. #define SPI_CTL_MODE(x) ((x) << 28)
  29. #define SPI_CTL_MODE_CPHA (1 << 28)
  30. #define SPI_CTL_MODE_CPOL (1 << 29)
  31. //#define SPI_CTL_DUAL_QUAD_SEL_MASK (0x0 << 27)
  32. //#define SPI_CTL_DUAL_QUAD_SEL_2x_4x (0x0 << 27)
  33. //#define SPI_CTL_DUAL_QUAD_SEL_DUAL_QUAL (0x1 << 27)
  34. #define SPI_CTL_RXW_DELAY_MASK (0x1 << 26)
  35. #define SPI_CTL_RXW_DELAY_2CYCLE (0x0 << 26)
  36. #define SPI_CTL_RXW_DELAY_3CYCLE (0x1 << 26)
  37. #define SPI_CTL_DMS_MASK (0x1 << 25)
  38. #define SPI_CTL_DMS_BURST8 (0x0 << 25)
  39. #define SPI_CTL_DMS_SINGLE (0x1 << 25)
  40. #define SPI_CTL_TXCEB_MASK (0x0 << 24)
  41. #define SPI_CTL_TXCEB_NOT_CONVERT (0x0 << 24)
  42. #define SPI_CTL_TXCEB_CONVERT (0x1 << 24)
  43. #define SPI_CTL_RXCEB_MASK (0x0 << 23)
  44. #define SPI_CTL_RXCEB_NOT_CONVERT (0x0 << 23)
  45. #define SPI_CTL_RXCEB_CONVERT (0x1 << 23)
  46. #define SPI_CTL_MS_SEL_MASK (0x1 << 22)
  47. #define SPI_CTL_MS_SEL_MASTER (0x0 << 22)
  48. #define SPI_CTL_MS_SEL_SLAVE (0x1 << 22)
  49. #define SPI_CTL_SB_SEL_MASK (0x1 << 21)
  50. #define SPI_CTL_SB_SEL_MSB (0x0 << 21)
  51. #define SPI_CTL_SB_SEL_LSB (0x1 << 21)
  52. #if defined(CONFIG_SOC_SERIES_LEOPARD)
  53. #define SPI_CTL_DELAYCHAIN_MASK (0x1f << 15)
  54. #define SPI_CTL_DELAYCHAIN_SHIFT (15)
  55. #define SPI_CTL_DELAYCHAIN(x) ((x) << 15)
  56. #define SPI_STATUS_RX_HALF_FULL (1 << 13)
  57. #define SPI_STATUS_TX_HALF_FULL (1 << 12)
  58. #else
  59. #define SPI_CTL_DELAYCHAIN_MASK (0xf << 16)
  60. #define SPI_CTL_DELAYCHAIN_SHIFT (16)
  61. #define SPI_CTL_DELAYCHAIN(x) ((x) << 16)
  62. #endif
  63. #define SPI_CTL_REQ_MASK (0x0 << 15)
  64. #define SPI_CTL_REQ_DISABLE (0x0 << 15)
  65. #define SPI_CTL_REQ_ENABLE (0x1 << 15)
  66. #define SPI_CTL_QPIEN_MASK (0x0 << 14)
  67. #define SPI_CTL_QPIEN_DISABLE (0x0 << 14)
  68. #define SPI_CTL_QPIEN_ENABLE (0x1 << 14)
  69. #define SPI_CTL_TOUT_CTRL_MASK (0x0 << 12)
  70. #define SPI_CTL_TOUT_CTRL_(x) ((x) << 12)
  71. #define SPI_CTL_IO_MODE_MASK (0x0 << 10)
  72. #define SPI_CTL_IO_MODE_(x) ((x) << 10)
  73. #define SPI_CTL_TX_IRQ_EN (1 << 9)
  74. #define SPI_CTL_RX_IRQ_EN (1 << 8)
  75. #define SPI_CTL_TX_DRQ_EN (1 << 7)
  76. #define SPI_CTL_RX_DRQ_EN (1 << 6)
  77. #define SPI_CTL_TX_FIFO_EN (1 << 5)
  78. #define SPI_CTL_RX_FIFO_EN (1 << 4)
  79. #define SPI_CTL_SS (1 << 3)
  80. #define SPI_CTL_LOOP (1 << 2)
  81. #define SPI_CTL_WR_MODE_MASK (0x3 << 0)
  82. #define SPI_CTL_WR_MODE_DISABLE (0x0 << 0)
  83. #define SPI_CTL_WR_MODE_READ (0x1 << 0)
  84. #define SPI_CTL_WR_MODE_WRITE (0x2 << 0)
  85. #define SPI_CTL_WR_MODE_READ_WRITE (0x3 << 0)
  86. #define SPI_STATUS_TX_FIFO_WERR (1 << 11)
  87. #define SPI_STATUS_RX_FIFO_WERR (1 << 9)
  88. #define SPI_STATUS_RX_FIFO_RERR (1 << 8)
  89. #define SPI_STATUS_RX_FULL (1 << 7)
  90. #define SPI_STATUS_RX_EMPTY (1 << 6)
  91. #define SPI_STATUS_TX_FULL (1 << 5)
  92. #define SPI_STATUS_TX_EMPTY (1 << 4)
  93. #define SPI_STATUS_TX_IRQ_PD (1 << 3)
  94. #define SPI_STATUS_RX_IRQ_PD (1 << 2)
  95. #define SPI_STATUS_BUSY (1 << 0)
  96. #define SPI_STATUS_ERR_MASK (SPI_STATUS_RX_FIFO_RERR | \
  97. SPI_STATUS_RX_FIFO_WERR | \
  98. SPI_STATUS_TX_FIFO_WERR)
  99. #define SPI_DMA_STRANFER_MIN_LEN 8
  100. #define SPI_FIFO_LEN 64
  101. //#define CONFIG_STANDARD_SPI
  102. struct acts_spi_controller
  103. {
  104. volatile uint32_t ctrl;
  105. volatile uint32_t status;
  106. volatile uint32_t txdat;
  107. volatile uint32_t rxdat;
  108. volatile uint32_t bc;
  109. } ;
  110. struct acts_spi_config {
  111. struct acts_spi_controller *spi;
  112. uint32_t spiclk_reg;
  113. const char *dma_dev_name;
  114. uint8_t txdma_id;
  115. uint8_t txdma_chan;
  116. uint8_t rxdma_id;
  117. uint8_t rxdma_chan;
  118. uint8_t clock_id;
  119. uint8_t reset_id;
  120. uint8_t flag_use_dma:1;
  121. #if defined(CONFIG_SOC_SERIES_LEOPARD)
  122. uint8_t spi_3wire:1;
  123. uint8_t delay_chain:6;
  124. #else
  125. uint8_t delay_chain:7;
  126. #endif
  127. };
  128. struct acts_spi_data {
  129. struct spi_context ctx;
  130. const struct device *dma_dev;
  131. struct k_sem dma_sync;
  132. uint8_t rxdma_chan;
  133. uint8_t txdma_chan;
  134. };
  135. #define DEV_CFG(dev) \
  136. ((const struct acts_spi_config *const)(dev)->config)
  137. #define DEV_DATA(dev) \
  138. ((struct acts_spi_data *const)(dev)->data)
  139. bool spi_acts_transfer_ongoing(struct acts_spi_data *data)
  140. {
  141. return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
  142. }
  143. static void spi_acts_wait_tx_complete(struct acts_spi_controller *spi)
  144. {
  145. while (!(spi->status & SPI_STATUS_TX_EMPTY))
  146. ;
  147. /* wait until tx fifo is empty for master mode*/
  148. while (((spi->ctrl & SPI_CTL_MS_SEL_MASK) == SPI_CTL_MS_SEL_MASTER) &&
  149. spi->status & SPI_STATUS_BUSY)
  150. ;
  151. }
  152. static void spi_acts_set_clk(const struct acts_spi_config *cfg, uint32_t freq_khz)
  153. {
  154. #if 1
  155. /* setup spi1 clock to 78MHz, coreclk/2 */
  156. //sys_write32((sys_read32(CMU_SPICLK) & ~0xff00) | 0x0100, CMU_SPICLK);
  157. /* FIXME: call soc_freq_set_spi_freq()
  158. * setup spi2 clock to 396/6 =66m bps, coreclk/6
  159. */
  160. // devclk=180M, spiclk=devclk/2=90M
  161. clk_set_rate(cfg->clock_id, freq_khz*1000);
  162. k_busy_wait(100);
  163. #endif
  164. }
  165. static void dma_done_callback(const struct device *dev, void *callback_data, uint32_t ch , int type)
  166. {
  167. struct acts_spi_data *data = (struct acts_spi_data *)callback_data;
  168. //if (type != DMA_IRQ_TC)
  169. //return;
  170. LOG_DBG("spi dma transfer is done");
  171. k_sem_give(&data->dma_sync);
  172. }
  173. static int spi_acts_start_dma(const struct acts_spi_config *cfg,
  174. struct acts_spi_data *data,
  175. uint32_t dma_chan,
  176. uint8_t *buf,
  177. int32_t len,
  178. bool is_tx,
  179. dma_callback_t callback)
  180. {
  181. struct acts_spi_controller *spi = cfg->spi;
  182. struct dma_config dma_cfg = {0};
  183. struct dma_block_config dma_block_cfg = {0};
  184. if (callback) {
  185. dma_cfg.dma_callback = callback;
  186. dma_cfg.user_data = data;
  187. dma_cfg.complete_callback_en = 1;
  188. }
  189. dma_cfg.block_count = 1;
  190. dma_cfg.head_block = &dma_block_cfg;
  191. dma_block_cfg.block_size = len;
  192. if (is_tx) {
  193. dma_cfg.dma_slot = cfg->txdma_id;
  194. dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
  195. dma_block_cfg.source_address = (uint32_t)buf;
  196. dma_block_cfg.dest_address = (uint32_t)&spi->txdat;
  197. dma_cfg.dest_data_size = 1;
  198. } else {
  199. dma_cfg.dma_slot = cfg->rxdma_id;
  200. dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
  201. dma_block_cfg.source_address = (uint32_t)&spi->rxdat;
  202. dma_block_cfg.dest_address = (uint32_t)buf;
  203. dma_cfg.source_data_size = 1;
  204. }
  205. if(len < 8)//data length is too short
  206. dma_cfg.source_burst_length = 1;
  207. if (dma_config(data->dma_dev, dma_chan, &dma_cfg)) {
  208. LOG_ERR("dma%d config error", dma_chan);
  209. return -1;
  210. }
  211. if (dma_start(data->dma_dev, dma_chan)) {
  212. LOG_ERR("dma%d start error", dma_chan);
  213. return -1;
  214. }
  215. return 0;
  216. }
  217. static void spi_acts_stop_dma(const struct acts_spi_config *cfg,
  218. struct acts_spi_data *data,
  219. uint32_t dma_chan)
  220. {
  221. dma_stop(data->dma_dev, dma_chan);
  222. }
  223. static int spi_acts_read_data_by_dma(const struct acts_spi_config *cfg,
  224. struct acts_spi_data *data,
  225. uint8_t *buf, int32_t len)
  226. {
  227. struct acts_spi_controller *spi = cfg->spi;
  228. int ret;
  229. spi->bc = len;
  230. spi->ctrl = (spi->ctrl & ~(SPI_CTL_WR_MODE_MASK | SPI_CTL_DMS_MASK)) |
  231. SPI_CTL_WR_MODE_READ | SPI_CTL_CLK_SEL_DMA |
  232. SPI_CTL_RX_DRQ_EN;
  233. if(len < 8)
  234. spi->ctrl |= SPI_CTL_DMS_SINGLE;
  235. ret = spi_acts_start_dma(cfg, data, data->rxdma_chan, buf, len,
  236. false, dma_done_callback);
  237. if (ret) {
  238. LOG_ERR("faield to start dma chan 0x%x\n", data->rxdma_chan);
  239. goto out;
  240. }
  241. /* wait until dma transfer is done */
  242. k_sem_take(&data->dma_sync, K_FOREVER);
  243. out:
  244. spi_acts_stop_dma(cfg, data, data->rxdma_chan);
  245. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA | SPI_CTL_RX_DRQ_EN);
  246. return ret;
  247. }
  248. static int spi_acts_write_data_by_dma(const struct acts_spi_config *cfg,
  249. struct acts_spi_data *data,
  250. const uint8_t *buf, int32_t len)
  251. {
  252. struct acts_spi_controller *spi = cfg->spi;
  253. int ret;
  254. spi->bc = len;
  255. spi->ctrl = (spi->ctrl & ~(SPI_CTL_WR_MODE_MASK | SPI_CTL_DMS_MASK)) |
  256. SPI_CTL_WR_MODE_WRITE | SPI_CTL_CLK_SEL_DMA |
  257. SPI_CTL_TX_DRQ_EN;
  258. if(len < 8)
  259. spi->ctrl |= SPI_CTL_DMS_SINGLE;
  260. ret = spi_acts_start_dma(cfg, data, data->txdma_chan, (uint8_t *)buf, len,
  261. true, dma_done_callback);
  262. if (ret) {
  263. LOG_ERR("faield to start tx dma chan 0x%x\n", data->txdma_chan);
  264. goto out;
  265. }
  266. /* wait until dma transfer is done */
  267. k_sem_take(&data->dma_sync, K_FOREVER);
  268. /* wait until TX FIFO empty */
  269. while(!(spi->status & SPI_STATUS_TX_EMPTY));
  270. out:
  271. spi_acts_stop_dma(cfg, data, data->txdma_chan);
  272. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA | SPI_CTL_TX_DRQ_EN);
  273. return ret;
  274. }
  275. int spi_acts_write_read_data_by_dma(const struct acts_spi_config *cfg,
  276. struct acts_spi_data *data,
  277. const uint8_t *tx_buf, uint8_t *rx_buf, int32_t len)
  278. {
  279. struct acts_spi_controller *spi = cfg->spi;
  280. int ret;
  281. spi->bc = len;
  282. spi->ctrl = (spi->ctrl & ~(SPI_CTL_WR_MODE_MASK | SPI_CTL_DMS_MASK)) |
  283. SPI_CTL_WR_MODE_READ_WRITE | SPI_CTL_CLK_SEL_DMA |
  284. SPI_CTL_TX_DRQ_EN | SPI_CTL_RX_DRQ_EN;
  285. if(len < 8)
  286. spi->ctrl |= SPI_CTL_DMS_SINGLE;
  287. ret = spi_acts_start_dma(cfg, data, data->rxdma_chan, rx_buf, len,
  288. false, dma_done_callback);
  289. if (ret) {
  290. LOG_ERR("faield to start dma rx chan 0x%x\n", data->rxdma_chan);
  291. goto out;
  292. }
  293. ret = spi_acts_start_dma(cfg, data, data->txdma_chan, (uint8_t *)tx_buf, len,
  294. true, NULL);
  295. if (ret) {
  296. LOG_ERR("faield to start dma tx chan 0x%x\n", data->txdma_chan);
  297. goto out;
  298. }
  299. /* wait until dma transfer is done */
  300. k_sem_take(&data->dma_sync, K_FOREVER);
  301. out:
  302. spi_acts_stop_dma(cfg, data, data->rxdma_chan);
  303. spi_acts_stop_dma(cfg, data, data->txdma_chan);
  304. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA |
  305. SPI_CTL_TX_DRQ_EN | SPI_CTL_RX_DRQ_EN);
  306. return 0;
  307. }
  308. static int spi_acts_write_data_by_cpu(struct acts_spi_controller *spi,
  309. const uint8_t *wbuf, int32_t len)
  310. {
  311. int tx_len = 0;
  312. /* switch to write mode */
  313. spi->bc = len;
  314. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_WRITE;
  315. while (tx_len < len) {
  316. if(!(spi->status & SPI_STATUS_TX_FULL)) {
  317. spi->txdat = *wbuf++;
  318. tx_len++;
  319. }
  320. }
  321. return 0;
  322. }
  323. static int spi_acts_read_data_by_cpu(struct acts_spi_controller *spi,
  324. uint8_t *rbuf, int32_t len)
  325. {
  326. int rx_len = 0;
  327. /* switch to write mode */
  328. spi->bc = len;
  329. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_READ;
  330. while (rx_len < len) {
  331. if(!(spi->status & SPI_STATUS_RX_EMPTY)) {
  332. *rbuf++ = spi->rxdat;
  333. rx_len++;
  334. }
  335. }
  336. return 0;
  337. }
  338. static int spi_acts_write_read_data_by_cpu(struct acts_spi_controller *spi,
  339. const uint8_t *wbuf, uint8_t *rbuf, int32_t len)
  340. {
  341. int rx_len = 0, tx_len = 0;
  342. uint32_t cycle;
  343. /* switch to write mode */
  344. spi->bc = len;
  345. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_READ_WRITE;
  346. cycle = k_cycle_get_32();
  347. while (rx_len < len || tx_len < len) {
  348. while((tx_len < rx_len + SPI_FIFO_LEN - 2) && (tx_len < len) && !(spi->status & SPI_STATUS_TX_FULL)) {
  349. spi->txdat = *wbuf++;
  350. tx_len++;
  351. }
  352. while((rx_len < len) && !(spi->status & SPI_STATUS_RX_EMPTY)) {
  353. *rbuf++ = spi->rxdat;
  354. rx_len++;
  355. }
  356. if(k_cyc_to_ms_ceil32(k_cycle_get_32()-cycle) > 1000){
  357. LOG_ERR("spi err: txlen=%d, rxlen=%d, bc=0x%x, ctl=0x%x, status=0x%x\n",tx_len, rx_len, spi->bc, spi->ctrl, spi->status);
  358. break;
  359. }
  360. }
  361. return 0;
  362. }
  363. int spi_acts_write_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  364. const uint8_t *tx_buf, int len)
  365. {
  366. int ret;
  367. if (cfg->flag_use_dma) {
  368. ret = spi_acts_write_data_by_dma(cfg, data, tx_buf, len);
  369. } else {
  370. ret = spi_acts_write_data_by_cpu(cfg->spi, tx_buf, len);
  371. }
  372. return ret;
  373. }
  374. int spi_acts_read_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  375. uint8_t *rx_buf, int len)
  376. {
  377. int ret;
  378. if (cfg->flag_use_dma) {
  379. ret = spi_acts_read_data_by_dma(cfg, data,
  380. rx_buf, len);
  381. } else {
  382. ret = spi_acts_read_data_by_cpu(cfg->spi,
  383. rx_buf, len);
  384. }
  385. return ret;
  386. }
  387. int spi_acts_write_read_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  388. uint8_t *tx_buf, uint8_t *rx_buf, int len)
  389. {
  390. int ret;
  391. if (cfg->flag_use_dma) {
  392. ret = spi_acts_write_read_data_by_dma(cfg, data, tx_buf, rx_buf, len);
  393. } else {
  394. ret = spi_acts_write_read_data_by_cpu(cfg->spi, tx_buf, rx_buf, len);
  395. }
  396. return ret;
  397. }
  398. int spi_acts_transfer_data(const struct acts_spi_config *cfg, struct acts_spi_data *data)
  399. {
  400. struct acts_spi_controller *spi = cfg->spi;
  401. struct spi_context *ctx = &data->ctx;
  402. int chunk_size;
  403. int ret = 0;
  404. chunk_size = spi_context_longest_current_buf(ctx);
  405. LOG_DBG("tx_len %d, rx_len %d, chunk_size %d",
  406. ctx->tx_len, ctx->rx_len, chunk_size);
  407. spi->ctrl |= SPI_CTL_RX_FIFO_EN | SPI_CTL_TX_FIFO_EN;
  408. if (ctx->tx_len && ctx->rx_len) {
  409. #ifdef CONFIG_STANDARD_SPI
  410. if(ctx->tx_buf && ctx->rx_buf){
  411. ret = spi_acts_write_read_data(cfg, data, (uint8_t*)ctx->tx_buf, ctx->rx_buf, chunk_size);
  412. }else if(ctx->tx_buf) {
  413. ret = spi_acts_write_data(cfg, data, (uint8_t*)ctx->tx_buf, chunk_size);
  414. }else{
  415. ret = spi_acts_read_data(cfg, data, ctx->rx_buf, chunk_size);
  416. }
  417. spi_context_update_tx(ctx, 1, chunk_size);
  418. spi_context_update_rx(ctx, 1, chunk_size);
  419. #else
  420. if(ctx->tx_buf != NULL) {
  421. ret = spi_acts_write_data(cfg, data, ctx->tx_buf, ctx->tx_len);
  422. }
  423. if (ctx->rx_buf != NULL) {
  424. ret = spi_acts_read_data(cfg, data, ctx->rx_buf, ctx->rx_len);
  425. }
  426. spi_context_update_tx(ctx, 1, ctx->tx_len);
  427. spi_context_update_rx(ctx, 1, ctx->rx_len);
  428. #endif
  429. } else if (ctx->tx_len) {
  430. ret = spi_acts_write_data(cfg, data, (uint8_t*)ctx->tx_buf, chunk_size);
  431. spi_context_update_tx(ctx, 1, chunk_size);
  432. } else {
  433. ret = spi_acts_read_data(cfg, data, ctx->rx_buf, chunk_size);
  434. spi_context_update_rx(ctx, 1, chunk_size);
  435. }
  436. if (!ret) {
  437. spi_acts_wait_tx_complete(spi);
  438. if (spi->status & SPI_STATUS_ERR_MASK) {
  439. ret = -EIO;
  440. }
  441. }
  442. if (ret) {
  443. LOG_ERR("spi(%p) transfer error: ctrl: 0x%x, status: 0x%x",
  444. spi, spi->ctrl, spi->status);
  445. }
  446. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_DISABLE;
  447. spi->status |= SPI_STATUS_ERR_MASK;
  448. return ret;
  449. }
  450. int spi_acts_configure(const struct acts_spi_config *cfg,
  451. struct acts_spi_data *spi,
  452. const struct spi_config *config)
  453. {
  454. uint32_t ctrl, word_size;
  455. uint32_t op = config->operation;
  456. LOG_DBG("%p (prev %p): op 0x%x", config, spi->ctx.config, op);
  457. ctrl = SPI_CTL_DELAYCHAIN(cfg->delay_chain);
  458. if (spi_context_configured(&spi->ctx, config)) {
  459. /* Nothing to do */
  460. return 0;
  461. }
  462. spi_acts_set_clk(cfg, config->frequency / 1000);
  463. #if defined(CONFIG_SOC_SERIES_LEOPARD)
  464. if (cfg->clock_id == CLOCK_ID_SPI2)
  465. ctrl |= SPI_CTL_IO_MODE_(cfg->spi_3wire);
  466. #endif
  467. if (op & (SPI_LINES_DUAL | SPI_LINES_QUAD))
  468. return -EINVAL;
  469. if (SPI_OP_MODE_SLAVE == SPI_OP_MODE_GET(op))
  470. ctrl |= SPI_CTL_MS_SEL_SLAVE;
  471. word_size = SPI_WORD_SIZE_GET(op);
  472. if (word_size == 8)
  473. ctrl |= SPI_CTL_FIFO_WIDTH_8BIT;
  474. else if (word_size == 32)
  475. ctrl |= SPI_CTL_FIFO_WIDTH_32BIT;
  476. else
  477. ctrl |= SPI_CTL_FIFO_WIDTH_8BIT;
  478. if (op & SPI_MODE_CPOL)
  479. ctrl |= SPI_CTL_MODE_CPOL;
  480. if (op & SPI_MODE_CPHA)
  481. ctrl |= SPI_CTL_MODE_CPHA;
  482. if (op & SPI_MODE_LOOP)
  483. ctrl |= SPI_CTL_LOOP;
  484. if (op & SPI_TRANSFER_LSB)
  485. ctrl |= SPI_CTL_SB_SEL_LSB;
  486. if (cfg->clock_id == CLOCK_ID_SPI1)
  487. ctrl |= SPI_CTL_REQ_ENABLE;
  488. cfg->spi->ctrl = ctrl;
  489. /* At this point, it's mandatory to set this on the context! */
  490. spi->ctx.config = config;
  491. spi_context_cs_configure(&spi->ctx);
  492. return 0;
  493. }
  494. int transceive(const struct device *dev,
  495. const struct spi_config *config,
  496. const struct spi_buf_set *tx_bufs,
  497. const struct spi_buf_set *rx_bufs,
  498. bool asynchronous,
  499. struct k_poll_signal *signal)
  500. {
  501. const struct acts_spi_config *cfg =DEV_CFG(dev);;
  502. struct acts_spi_data *data = DEV_DATA(dev);;
  503. struct acts_spi_controller *spi = cfg->spi;
  504. int ret;
  505. if ((tx_bufs && !tx_bufs->count) && (rx_bufs && !rx_bufs->count)){
  506. return 0;
  507. }
  508. spi_context_lock(&data->ctx, asynchronous, signal);
  509. /* Configure */
  510. ret = spi_acts_configure(cfg, data, config);
  511. if (ret) {
  512. goto out;
  513. }
  514. /* Set buffers info */
  515. spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
  516. /* assert chip select */
  517. if (SPI_OP_MODE_MASTER == SPI_OP_MODE_GET(config->operation)) {
  518. if (data->ctx.config->cs) {
  519. spi_context_cs_control(&data->ctx, true);
  520. } else {
  521. spi->ctrl &= ~SPI_CTL_SS;
  522. }
  523. }
  524. do {
  525. ret = spi_acts_transfer_data(cfg, data);
  526. } while (!ret && spi_acts_transfer_ongoing(data));
  527. /* deassert chip select */
  528. if (SPI_OP_MODE_MASTER == SPI_OP_MODE_GET(config->operation)) {
  529. if (data->ctx.config->cs) {
  530. spi_context_cs_control(&data->ctx, false);
  531. } else {
  532. spi->ctrl |= SPI_CTL_SS;
  533. }
  534. }
  535. out:
  536. spi_context_release(&data->ctx, ret);
  537. return ret;
  538. }
  539. static int spi_acts_transceive(const struct device *dev,
  540. const struct spi_config *config,
  541. const struct spi_buf_set *tx_bufs,
  542. const struct spi_buf_set *rx_bufs)
  543. {
  544. return transceive(dev, config, tx_bufs, rx_bufs, false, NULL);
  545. }
  546. #ifdef CONFIG_SPI_ASYNC
  547. static int spi_acts_transceive_async(const struct device *dev,
  548. const struct spi_config *config,
  549. const struct spi_buf_set *tx_bufs,
  550. const struct spi_buf_set *rx_bufs,
  551. struct k_poll_signal *async)
  552. {
  553. return transceive(dev, config, tx_bufs, rx_bufs, true, async);
  554. }
  555. #endif /* CONFIG_SPI_ASYNC */
  556. static int spi_acts_release(const struct device *dev,
  557. const struct spi_config *config)
  558. {
  559. struct acts_spi_data *data = dev->data;
  560. spi_context_unlock_unconditionally(&data->ctx);
  561. return 0;
  562. }
  563. #define SPI_TEST
  564. #ifdef SPI_TEST
  565. static int spi_test(const struct device *dev)
  566. {
  567. struct spi_config config = {
  568. .frequency = 2500000,
  569. .operation = SPI_OP_MODE_MASTER | SPI_WORD_SET(8),
  570. .slave = 0,
  571. .cs = NULL,
  572. };
  573. struct spi_buf_set tx_bufs;
  574. struct spi_buf_set rx_bufs;
  575. struct spi_buf tx_buf[1];
  576. struct spi_buf rx_buf[1];
  577. u8_t buf_tx[16];
  578. u8_t buf_rx[48] = {0};
  579. int ret;
  580. memset(buf_tx, 0x86, 16);
  581. printk("spi test:%s\n", dev->name);
  582. tx_buf[0].buf = buf_tx;
  583. tx_buf[0].len = 1;
  584. rx_buf[0].buf = buf_rx;
  585. rx_buf[0].len = 1;
  586. rx_bufs.buffers = rx_buf;
  587. rx_bufs.count = 1;
  588. tx_bufs.buffers = tx_buf;
  589. tx_bufs.count = 1;
  590. ret = spi_transceive(dev, &config, &tx_bufs, &rx_bufs);
  591. if(ret)
  592. printk("spi test error\n");
  593. else
  594. printk("spi test pass\n");
  595. printk("buf_rx : 0x%x 0x%x 0x%x 0x%x\n", buf_rx[0], buf_rx[1], buf_rx[14], buf_rx[15]);
  596. return 0;
  597. }
  598. #endif
  599. int spi_acts_init(const struct device *dev)
  600. {
  601. const struct acts_spi_config *config = DEV_CFG(dev);
  602. struct acts_spi_data *data = DEV_DATA(dev);
  603. int chan;
  604. k_sem_init(&data->dma_sync, 0, 1);
  605. if (config->flag_use_dma) {
  606. data->dma_dev = device_get_binding(config->dma_dev_name);
  607. if (!data->dma_dev){
  608. LOG_ERR("dma-dev binding err:%s\n", config->dma_dev_name);
  609. return -ENODEV;
  610. }
  611. chan = dma_request(data->dma_dev, config->txdma_chan);
  612. if(chan < 0){
  613. LOG_ERR("dma-dev txchan config err chan=%d\n", config->txdma_chan);
  614. return -ENODEV;
  615. }
  616. data->txdma_chan = chan;
  617. chan = dma_request(data->dma_dev, config->rxdma_chan);
  618. if(chan < 0){
  619. LOG_ERR("dma-dev rxchan config err chan=%d\n", config->txdma_chan);
  620. return -ENODEV;
  621. }
  622. data->rxdma_chan = chan;
  623. }
  624. printk("spi:clkreg=0x%x, dma=%d \n",config->spiclk_reg, config->flag_use_dma);
  625. /* enable spi controller clock */
  626. acts_clock_peripheral_enable(config->clock_id);
  627. /* reset spi controller */
  628. acts_reset_peripheral(config->reset_id);
  629. spi_context_unlock_unconditionally(&data->ctx);
  630. #ifdef SPI_TEST
  631. spi_test(dev);
  632. #endif
  633. return 0;
  634. }
  635. const struct spi_driver_api spi_acts_driver_api = {
  636. .transceive = spi_acts_transceive,
  637. #ifdef CONFIG_SPI_ASYNC
  638. .transceive_async = spi_acts_transceive_async,
  639. #endif
  640. .release = spi_acts_release,
  641. };
  642. #define dma_use(n) (\
  643. .dma_dev_name = CONFIG_DMA_0_NAME, \
  644. .txdma_id = CONFIG_SPI_##n##_DMA_ID,\
  645. .txdma_chan = CONFIG_SPI_##n##_TXDMA_CHAN,\
  646. .rxdma_id = CONFIG_SPI_##n##_DMA_ID,\
  647. .rxdma_chan = CONFIG_SPI_##n##_RXDMA_CHAN,\
  648. .flag_use_dma = 1, \
  649. )
  650. #define dma_not(n) (\
  651. .flag_use_dma = 0, \
  652. )
  653. #if defined(CONFIG_SOC_SERIES_LEOPARD)
  654. #define SPI_ACTS_DEFINE_CONFIG(n) \
  655. static const struct acts_spi_config spi_acts_config_##n = { \
  656. .spi = (struct acts_spi_controller *)SPI##n##_REG_BASE,\
  657. .spiclk_reg = SPI##n##_REG_BASE,\
  658. .clock_id = CLOCK_ID_SPI##n,\
  659. .reset_id = RESET_ID_SPI##n,\
  660. .spi_3wire = 0,\
  661. COND_CODE_1(CONFIG_SPI_##n##_USE_DMA,dma_use(n), dma_not(n))\
  662. }
  663. #else
  664. #define SPI_ACTS_DEFINE_CONFIG(n) \
  665. static const struct acts_spi_config spi_acts_config_##n = { \
  666. .spi = (struct acts_spi_controller *)SPI##n##_REG_BASE,\
  667. .spiclk_reg = SPI##n##_REG_BASE,\
  668. .clock_id = CLOCK_ID_SPI##n,\
  669. .reset_id = RESET_ID_SPI##n,\
  670. COND_CODE_1(CONFIG_SPI_##n##_USE_DMA,dma_use(n), dma_not(n))\
  671. }
  672. #endif
  673. #define SPI_ACTS_DEVICE_INIT(n) \
  674. static const struct device DEVICE_NAME_GET(spi_acts_##n); \
  675. SPI_ACTS_DEFINE_CONFIG(n); \
  676. static struct acts_spi_data spi_acts_dev_data_##n = { \
  677. SPI_CONTEXT_INIT_LOCK(spi_acts_dev_data_##n, ctx), \
  678. SPI_CONTEXT_INIT_SYNC(spi_acts_dev_data_##n, ctx), \
  679. }; \
  680. DEVICE_DEFINE(spi_acts_##n, \
  681. CONFIG_SPI_##n##_NAME, \
  682. &spi_acts_init, NULL, &spi_acts_dev_data_##n, \
  683. &spi_acts_config_##n, POST_KERNEL, \
  684. CONFIG_SPI_INIT_PRIORITY, &spi_acts_driver_api);
  685. #if IS_ENABLED(CONFIG_SPI_1)
  686. SPI_ACTS_DEVICE_INIT(1)
  687. #endif
  688. #if IS_ENABLED(CONFIG_SPI_2)
  689. SPI_ACTS_DEVICE_INIT(2)
  690. #endif
  691. #if IS_ENABLED(CONFIG_SPI_3)
  692. SPI_ACTS_DEVICE_INIT(3)
  693. #endif