spi_acts.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * Copyright (c) 2018 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief SPI driver for Actions SoC
  9. */
  10. #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
  11. #include <logging/log.h>
  12. LOG_MODULE_REGISTER(spi_acts);
  13. #include "spi_context.h"
  14. #include <errno.h>
  15. #include <device.h>
  16. #include <drivers/spi.h>
  17. #include <drivers/dma.h>
  18. #include <soc.h>
  19. /* SPI registers macros*/
  20. #define SPI_CTL_CLK_SEL_MASK (0x1 << 31)
  21. #define SPI_CTL_CLK_SEL_CPU (0x0 << 31)
  22. #define SPI_CTL_CLK_SEL_DMA (0x1 << 31)
  23. #define SPI_CTL_FIFO_WIDTH_MASK (0x1 << 30)
  24. #define SPI_CTL_FIFO_WIDTH_8BIT (0x0 << 30)
  25. #define SPI_CTL_FIFO_WIDTH_32BIT (0x1 << 30)
  26. #define SPI_CTL_MODE_MASK (3 << 28)
  27. #define SPI_CTL_MODE(x) ((x) << 28)
  28. #define SPI_CTL_MODE_CPHA (1 << 28)
  29. #define SPI_CTL_MODE_CPOL (1 << 29)
  30. //#define SPI_CTL_DUAL_QUAD_SEL_MASK (0x0 << 27)
  31. //#define SPI_CTL_DUAL_QUAD_SEL_2x_4x (0x0 << 27)
  32. //#define SPI_CTL_DUAL_QUAD_SEL_DUAL_QUAL (0x1 << 27)
  33. #define SPI_CTL_RXW_DELAY_MASK (0x1 << 26)
  34. #define SPI_CTL_RXW_DELAY_2CYCLE (0x0 << 26)
  35. #define SPI_CTL_RXW_DELAY_3CYCLE (0x1 << 26)
  36. #define SPI_CTL_DMS_MASK (0x0 << 25)
  37. #define SPI_CTL_DMS_BURST8 (0x0 << 25)
  38. #define SPI_CTL_DMS_SINGLE (0x1 << 25)
  39. #define SPI_CTL_TXCEB_MASK (0x0 << 24)
  40. #define SPI_CTL_TXCEB_NOT_CONVERT (0x0 << 24)
  41. #define SPI_CTL_TXCEB_CONVERT (0x1 << 24)
  42. #define SPI_CTL_RXCEB_MASK (0x0 << 23)
  43. #define SPI_CTL_RXCEB_NOT_CONVERT (0x0 << 23)
  44. #define SPI_CTL_RXCEB_CONVERT (0x1 << 23)
  45. #define SPI_CTL_MS_SEL_MASK (0x1 << 22)
  46. #define SPI_CTL_MS_SEL_MASTER (0x0 << 22)
  47. #define SPI_CTL_MS_SEL_SLAVE (0x1 << 22)
  48. #define SPI_CTL_SB_SEL_MASK (0x1 << 21)
  49. #define SPI_CTL_SB_SEL_MSB (0x0 << 21)
  50. #define SPI_CTL_SB_SEL_LSB (0x1 << 21)
  51. #define SPI_CTL_DELAYCHAIN_MASK (0xf << 16)
  52. #define SPI_CTL_DELAYCHAIN_SHIFT (16)
  53. #define SPI_CTL_DELAYCHAIN(x) ((x) << 16)
  54. #define SPI_CTL_REQ_MASK (0x0 << 15)
  55. #define SPI_CTL_REQ_DISABLE (0x0 << 15)
  56. #define SPI_CTL_REQ_ENABLE (0x1 << 15)
  57. #define SPI_CTL_QPIEN_MASK (0x0 << 14)
  58. #define SPI_CTL_QPIEN_DISABLE (0x0 << 14)
  59. #define SPI_CTL_QPIEN_ENABLE (0x1 << 14)
  60. #define SPI_CTL_TOUT_CTRL_MASK (0x0 << 12)
  61. #define SPI_CTL_TOUT_CTRL_(x) ((x) << 12)
  62. #define SPI_CTL_IO_MODE_MASK (0x0 << 10)
  63. #define SPI_CTL_IO_MODE_(x) ((x) << 10)
  64. #define SPI_CTL_TX_IRQ_EN (1 << 9)
  65. #define SPI_CTL_RX_IRQ_EN (1 << 8)
  66. #define SPI_CTL_TX_DRQ_EN (1 << 7)
  67. #define SPI_CTL_RX_DRQ_EN (1 << 6)
  68. #define SPI_CTL_TX_FIFO_EN (1 << 5)
  69. #define SPI_CTL_RX_FIFO_EN (1 << 4)
  70. #define SPI_CTL_SS (1 << 3)
  71. #define SPI_CTL_LOOP (1 << 2)
  72. #define SPI_CTL_WR_MODE_MASK (0x3 << 0)
  73. #define SPI_CTL_WR_MODE_DISABLE (0x0 << 0)
  74. #define SPI_CTL_WR_MODE_READ (0x1 << 0)
  75. #define SPI_CTL_WR_MODE_WRITE (0x2 << 0)
  76. #define SPI_CTL_WR_MODE_READ_WRITE (0x3 << 0)
  77. #define SPI_STATUS_TX_FIFO_WERR (1 << 11)
  78. #define SPI_STATUS_RX_FIFO_WERR (1 << 9)
  79. #define SPI_STATUS_RX_FIFO_RERR (1 << 8)
  80. #define SPI_STATUS_RX_FULL (1 << 7)
  81. #define SPI_STATUS_RX_EMPTY (1 << 6)
  82. #define SPI_STATUS_TX_FULL (1 << 5)
  83. #define SPI_STATUS_TX_EMPTY (1 << 4)
  84. #define SPI_STATUS_TX_IRQ_PD (1 << 3)
  85. #define SPI_STATUS_RX_IRQ_PD (1 << 2)
  86. #define SPI_STATUS_BUSY (1 << 0)
  87. #define SPI_STATUS_ERR_MASK (SPI_STATUS_RX_FIFO_RERR | \
  88. SPI_STATUS_RX_FIFO_WERR | \
  89. SPI_STATUS_TX_FIFO_WERR)
  90. #define SPI_DMA_STRANFER_MIN_LEN 8
  91. struct acts_spi_controller
  92. {
  93. volatile uint32_t ctrl;
  94. volatile uint32_t status;
  95. volatile uint32_t txdat;
  96. volatile uint32_t rxdat;
  97. volatile uint32_t bc;
  98. } ;
  99. struct acts_spi_config {
  100. struct acts_spi_controller *spi;
  101. uint32_t spiclk_reg;
  102. const char *dma_dev_name;
  103. uint8_t txdma_id;
  104. uint8_t txdma_chan;
  105. uint8_t rxdma_id;
  106. uint8_t rxdma_chan;
  107. uint8_t clock_id;
  108. uint8_t reset_id;
  109. uint8_t flag_use_dma:1;
  110. uint8_t delay_chain:7;
  111. };
  112. struct acts_spi_data {
  113. struct spi_context ctx;
  114. const struct device *dma_dev;
  115. struct k_sem dma_sync;
  116. uint8_t rxdma_chan;
  117. uint8_t txdma_chan;
  118. };
  119. #define DEV_CFG(dev) \
  120. ((const struct acts_spi_config *const)(dev)->config)
  121. #define DEV_DATA(dev) \
  122. ((struct acts_spi_data *const)(dev)->data)
  123. bool spi_acts_transfer_ongoing(struct acts_spi_data *data)
  124. {
  125. return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
  126. }
  127. static void spi_acts_wait_tx_complete(struct acts_spi_controller *spi)
  128. {
  129. while (!(spi->status & SPI_STATUS_TX_EMPTY))
  130. ;
  131. /* wait until tx fifo is empty for master mode*/
  132. while (((spi->ctrl & SPI_CTL_MS_SEL_MASK) == SPI_CTL_MS_SEL_MASTER) &&
  133. spi->status & SPI_STATUS_BUSY)
  134. ;
  135. }
  136. static void spi_acts_set_clk(const struct acts_spi_config *cfg, uint32_t freq_khz)
  137. {
  138. #if 1
  139. /* setup spi1 clock to 78MHz, coreclk/2 */
  140. //sys_write32((sys_read32(CMU_SPICLK) & ~0xff00) | 0x0100, CMU_SPICLK);
  141. /* FIXME: call soc_freq_set_spi_freq()
  142. * setup spi2 clock to 396/6 =66m bps, coreclk/6
  143. */
  144. // devclk=180M, spiclk=devclk/2=90M
  145. clk_set_rate(cfg->clock_id, freq_khz);
  146. k_busy_wait(100);
  147. #endif
  148. }
  149. static void dma_done_callback(const struct device *dev, void *callback_data, uint32_t ch , int type)
  150. {
  151. struct acts_spi_data *data = (struct acts_spi_data *)callback_data;
  152. //if (type != DMA_IRQ_TC)
  153. //return;
  154. LOG_DBG("spi dma transfer is done");
  155. k_sem_give(&data->dma_sync);
  156. }
  157. static int spi_acts_start_dma(const struct acts_spi_config *cfg,
  158. struct acts_spi_data *data,
  159. uint32_t dma_chan,
  160. uint8_t *buf,
  161. int32_t len,
  162. bool is_tx,
  163. dma_callback_t callback)
  164. {
  165. struct acts_spi_controller *spi = cfg->spi;
  166. struct dma_config dma_cfg = {0};
  167. struct dma_block_config dma_block_cfg = {0};
  168. if (callback) {
  169. dma_cfg.dma_callback = callback;
  170. dma_cfg.user_data = data;
  171. dma_cfg.complete_callback_en = 1;
  172. }
  173. dma_cfg.block_count = 1;
  174. dma_cfg.head_block = &dma_block_cfg;
  175. dma_block_cfg.block_size = len;
  176. if (is_tx) {
  177. dma_cfg.dma_slot = cfg->txdma_id;
  178. dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
  179. dma_block_cfg.source_address = (uint32_t)buf;
  180. dma_block_cfg.dest_address = (uint32_t)&spi->txdat;
  181. dma_cfg.dest_data_size = 1;
  182. } else {
  183. dma_cfg.dma_slot = cfg->rxdma_id;
  184. dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
  185. dma_block_cfg.source_address = (uint32_t)&spi->rxdat;
  186. dma_block_cfg.dest_address = (uint32_t)buf;
  187. dma_cfg.source_data_size = 1;
  188. }
  189. //if(len < 8)//data length is too short
  190. dma_cfg.source_burst_length = 1;
  191. if (dma_config(data->dma_dev, dma_chan, &dma_cfg)) {
  192. LOG_ERR("dma%d config error", dma_chan);
  193. return -1;
  194. }
  195. if (dma_start(data->dma_dev, dma_chan)) {
  196. LOG_ERR("dma%d start error", dma_chan);
  197. return -1;
  198. }
  199. return 0;
  200. }
  201. static void spi_acts_stop_dma(const struct acts_spi_config *cfg,
  202. struct acts_spi_data *data,
  203. uint32_t dma_chan)
  204. {
  205. dma_stop(data->dma_dev, dma_chan);
  206. }
  207. static int spi_acts_read_data_by_dma(const struct acts_spi_config *cfg,
  208. struct acts_spi_data *data,
  209. uint8_t *buf, int32_t len)
  210. {
  211. struct acts_spi_controller *spi = cfg->spi;
  212. int ret;
  213. spi->bc = len;
  214. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) |
  215. SPI_CTL_WR_MODE_READ | SPI_CTL_CLK_SEL_DMA |
  216. SPI_CTL_RX_DRQ_EN;
  217. if(len < 8)
  218. spi->ctrl |= SPI_CTL_DMS_SINGLE;
  219. ret = spi_acts_start_dma(cfg, data, data->rxdma_chan, buf, len,
  220. false, dma_done_callback);
  221. if (ret) {
  222. LOG_ERR("faield to start dma chan 0x%x\n", data->rxdma_chan);
  223. goto out;
  224. }
  225. /* wait until dma transfer is done */
  226. k_sem_take(&data->dma_sync, K_FOREVER);
  227. out:
  228. spi_acts_stop_dma(cfg, data, data->rxdma_chan);
  229. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA | SPI_CTL_RX_DRQ_EN);
  230. return ret;
  231. }
  232. static int spi_acts_write_data_by_dma(const struct acts_spi_config *cfg,
  233. struct acts_spi_data *data,
  234. const uint8_t *buf, int32_t len)
  235. {
  236. struct acts_spi_controller *spi = cfg->spi;
  237. int ret;
  238. spi->bc = len;
  239. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) |
  240. SPI_CTL_WR_MODE_WRITE | SPI_CTL_CLK_SEL_DMA |
  241. SPI_CTL_TX_DRQ_EN;
  242. if(len < 8)
  243. spi->ctrl |= SPI_CTL_DMS_SINGLE;
  244. ret = spi_acts_start_dma(cfg, data, data->txdma_chan, (uint8_t *)buf, len,
  245. true, dma_done_callback);
  246. if (ret) {
  247. LOG_ERR("faield to start tx dma chan 0x%x\n", data->txdma_chan);
  248. goto out;
  249. }
  250. /* wait until dma transfer is done */
  251. k_sem_take(&data->dma_sync, K_FOREVER);
  252. out:
  253. spi_acts_stop_dma(cfg, data, data->txdma_chan);
  254. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA | SPI_CTL_TX_DRQ_EN);
  255. return ret;
  256. }
  257. int spi_acts_write_read_data_by_dma(const struct acts_spi_config *cfg,
  258. struct acts_spi_data *data,
  259. const uint8_t *tx_buf, uint8_t *rx_buf, int32_t len)
  260. {
  261. struct acts_spi_controller *spi = cfg->spi;
  262. int ret;
  263. spi->bc = len;
  264. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) |
  265. SPI_CTL_WR_MODE_READ_WRITE | SPI_CTL_CLK_SEL_DMA |
  266. SPI_CTL_TX_DRQ_EN | SPI_CTL_RX_DRQ_EN;
  267. ret = spi_acts_start_dma(cfg, data, data->rxdma_chan, rx_buf, len,
  268. false, dma_done_callback);
  269. if (ret) {
  270. LOG_ERR("faield to start dma rx chan 0x%x\n", data->rxdma_chan);
  271. goto out;
  272. }
  273. ret = spi_acts_start_dma(cfg, data, data->txdma_chan, (uint8_t *)tx_buf, len,
  274. true, NULL);
  275. if (ret) {
  276. LOG_ERR("faield to start dma tx chan 0x%x\n", data->txdma_chan);
  277. goto out;
  278. }
  279. /* wait until dma transfer is done */
  280. k_sem_take(&data->dma_sync, K_FOREVER);
  281. out:
  282. spi_acts_stop_dma(cfg, data, data->rxdma_chan);
  283. spi_acts_stop_dma(cfg, data, data->txdma_chan);
  284. spi->ctrl = spi->ctrl & ~(SPI_CTL_CLK_SEL_DMA |
  285. SPI_CTL_TX_DRQ_EN | SPI_CTL_RX_DRQ_EN);
  286. return 0;
  287. }
  288. static int spi_acts_write_data_by_cpu(struct acts_spi_controller *spi,
  289. const uint8_t *wbuf, int32_t len)
  290. {
  291. int tx_len = 0;
  292. /* switch to write mode */
  293. spi->bc = len;
  294. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_WRITE;
  295. while (tx_len < len) {
  296. if(!(spi->status & SPI_STATUS_TX_FULL)) {
  297. spi->txdat = *wbuf++;
  298. tx_len++;
  299. }
  300. }
  301. return 0;
  302. }
  303. static int spi_acts_read_data_by_cpu(struct acts_spi_controller *spi,
  304. uint8_t *rbuf, int32_t len)
  305. {
  306. int rx_len = 0;
  307. /* switch to write mode */
  308. spi->bc = len;
  309. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_READ;
  310. while (rx_len < len) {
  311. if(!(spi->status & SPI_STATUS_RX_EMPTY)) {
  312. *rbuf++ = spi->rxdat;
  313. rx_len++;
  314. }
  315. }
  316. return 0;
  317. }
  318. static int spi_acts_write_read_data_by_cpu(struct acts_spi_controller *spi,
  319. const uint8_t *wbuf, uint8_t *rbuf, int32_t len)
  320. {
  321. int rx_len = 0, tx_len = 0;
  322. /* switch to write mode */
  323. spi->bc = len;
  324. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_READ_WRITE;
  325. while (rx_len < len || tx_len < len) {
  326. while((tx_len < len) && !(spi->status & SPI_STATUS_TX_FULL)) {
  327. spi->txdat = *wbuf++;
  328. tx_len++;
  329. }
  330. while((rx_len < len) && !(spi->status & SPI_STATUS_RX_EMPTY)) {
  331. *rbuf++ = spi->rxdat;
  332. rx_len++;
  333. }
  334. }
  335. return 0;
  336. }
  337. int spi_acts_write_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  338. const uint8_t *tx_buf, int len)
  339. {
  340. int ret;
  341. if (cfg->flag_use_dma) {
  342. ret = spi_acts_write_data_by_dma(cfg, data,
  343. tx_buf, len);
  344. } else {
  345. ret = spi_acts_write_data_by_cpu(cfg->spi,
  346. tx_buf, len);
  347. }
  348. return ret;
  349. }
  350. int spi_acts_read_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  351. uint8_t *rx_buf, int len)
  352. {
  353. int ret;
  354. if (cfg->flag_use_dma) {
  355. ret = spi_acts_read_data_by_dma(cfg, data,
  356. rx_buf, len);
  357. } else {
  358. ret = spi_acts_read_data_by_cpu(cfg->spi,
  359. rx_buf, len);
  360. }
  361. return ret;
  362. }
  363. int spi_acts_write_read_data(const struct acts_spi_config *cfg, struct acts_spi_data *data,
  364. uint8_t *tx_buf, uint8_t *rx_buf, int len)
  365. {
  366. int ret;
  367. if (cfg->flag_use_dma) {
  368. ret = spi_acts_write_read_data_by_dma(cfg, data,
  369. tx_buf, rx_buf, len);
  370. } else {
  371. ret = spi_acts_write_read_data_by_cpu(cfg->spi,
  372. tx_buf, rx_buf, len);
  373. }
  374. return ret;
  375. }
  376. int spi_acts_transfer_data(const struct acts_spi_config *cfg, struct acts_spi_data *data)
  377. {
  378. struct acts_spi_controller *spi = cfg->spi;
  379. struct spi_context *ctx = &data->ctx;
  380. int chunk_size;
  381. int ret = 0;
  382. chunk_size = spi_context_longest_current_buf(ctx);
  383. LOG_DBG("tx_len %d, rx_len %d, chunk_size %d",
  384. ctx->tx_len, ctx->rx_len, chunk_size);
  385. spi->ctrl |= SPI_CTL_RX_FIFO_EN | SPI_CTL_TX_FIFO_EN;
  386. if (ctx->tx_len && ctx->rx_len) {
  387. if(ctx->tx_buf != NULL) {
  388. ret = spi_acts_write_data(cfg, data, ctx->tx_buf, ctx->tx_len);
  389. }
  390. if (ctx->rx_buf != NULL) {
  391. ret = spi_acts_read_data(cfg, data, ctx->rx_buf, ctx->rx_len);
  392. }
  393. spi_context_update_tx(ctx, 1, ctx->tx_len);
  394. spi_context_update_rx(ctx, 1, ctx->rx_len);
  395. } else if (ctx->tx_len) {
  396. ret = spi_acts_write_data(cfg, data, (uint8_t*)ctx->tx_buf, chunk_size);
  397. spi_context_update_tx(ctx, 1, chunk_size);
  398. } else {
  399. ret = spi_acts_read_data(cfg, data, ctx->rx_buf, chunk_size);
  400. spi_context_update_rx(ctx, 1, chunk_size);
  401. }
  402. if (!ret) {
  403. spi_acts_wait_tx_complete(spi);
  404. if (spi->status & SPI_STATUS_ERR_MASK) {
  405. ret = -EIO;
  406. }
  407. }
  408. if (ret) {
  409. LOG_ERR("spi(%p) transfer error: ctrl: 0x%x, status: 0x%x",
  410. spi, spi->ctrl, spi->status);
  411. }
  412. spi->ctrl = (spi->ctrl & ~SPI_CTL_WR_MODE_MASK) | SPI_CTL_WR_MODE_DISABLE;
  413. spi->status |= SPI_STATUS_ERR_MASK;
  414. return ret;
  415. }
  416. int spi_acts_configure(const struct acts_spi_config *cfg,
  417. struct acts_spi_data *spi,
  418. const struct spi_config *config)
  419. {
  420. uint32_t ctrl, word_size;
  421. uint32_t op = config->operation;
  422. LOG_DBG("%p (prev %p): op 0x%x", config, spi->ctx.config, op);
  423. ctrl = SPI_CTL_DELAYCHAIN(cfg->delay_chain);
  424. if (spi_context_configured(&spi->ctx, config)) {
  425. /* Nothing to do */
  426. return 0;
  427. }
  428. spi_acts_set_clk(cfg, config->frequency / 1000);
  429. if (op & (SPI_LINES_DUAL | SPI_LINES_QUAD))
  430. return -EINVAL;
  431. if (SPI_OP_MODE_SLAVE == SPI_OP_MODE_GET(op))
  432. ctrl |= SPI_CTL_MS_SEL_SLAVE;
  433. word_size = SPI_WORD_SIZE_GET(op);
  434. if (word_size == 8)
  435. ctrl |= SPI_CTL_FIFO_WIDTH_8BIT;
  436. else if (word_size == 32)
  437. ctrl |= SPI_CTL_FIFO_WIDTH_32BIT;
  438. else
  439. ctrl |= SPI_CTL_FIFO_WIDTH_8BIT;
  440. if (op & SPI_MODE_CPOL)
  441. ctrl |= SPI_CTL_MODE_CPOL;
  442. if (op & SPI_MODE_CPHA)
  443. ctrl |= SPI_CTL_MODE_CPHA;
  444. if (op & SPI_MODE_LOOP)
  445. ctrl |= SPI_CTL_LOOP;
  446. if (op & SPI_TRANSFER_LSB)
  447. ctrl |= SPI_CTL_SB_SEL_LSB;
  448. ctrl |= SPI_CTL_REQ_ENABLE;
  449. cfg->spi->ctrl = ctrl;
  450. /* At this point, it's mandatory to set this on the context! */
  451. spi->ctx.config = config;
  452. spi_context_cs_configure(&spi->ctx);
  453. return 0;
  454. }
  455. int transceive(const struct device *dev,
  456. const struct spi_config *config,
  457. const struct spi_buf_set *tx_bufs,
  458. const struct spi_buf_set *rx_bufs,
  459. bool asynchronous,
  460. struct k_poll_signal *signal)
  461. {
  462. const struct acts_spi_config *cfg =DEV_CFG(dev);;
  463. struct acts_spi_data *data = DEV_DATA(dev);;
  464. struct acts_spi_controller *spi = cfg->spi;
  465. int ret;
  466. if (!tx_bufs->count && !rx_bufs->count) {
  467. return 0;
  468. }
  469. spi_context_lock(&data->ctx, asynchronous, signal);
  470. /* Configure */
  471. ret = spi_acts_configure(cfg, data, config);
  472. if (ret) {
  473. goto out;
  474. }
  475. /* Set buffers info */
  476. spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
  477. /* assert chip select */
  478. if (SPI_OP_MODE_MASTER == SPI_OP_MODE_GET(config->operation)) {
  479. if (data->ctx.config->cs) {
  480. spi_context_cs_control(&data->ctx, true);
  481. } else {
  482. spi->ctrl &= ~SPI_CTL_SS;
  483. }
  484. }
  485. do {
  486. ret = spi_acts_transfer_data(cfg, data);
  487. } while (!ret && spi_acts_transfer_ongoing(data));
  488. /* deassert chip select */
  489. if (SPI_OP_MODE_MASTER == SPI_OP_MODE_GET(config->operation)) {
  490. if (data->ctx.config->cs) {
  491. spi_context_cs_control(&data->ctx, false);
  492. } else {
  493. spi->ctrl |= SPI_CTL_SS;
  494. }
  495. }
  496. out:
  497. spi_context_release(&data->ctx, ret);
  498. return ret;
  499. }
  500. static int spi_acts_transceive(const struct device *dev,
  501. const struct spi_config *config,
  502. const struct spi_buf_set *tx_bufs,
  503. const struct spi_buf_set *rx_bufs)
  504. {
  505. return transceive(dev, config, tx_bufs, rx_bufs, false, NULL);
  506. }
  507. #ifdef CONFIG_SPI_ASYNC
  508. static int spi_acts_transceive_async(const struct device *dev,
  509. const struct spi_config *config,
  510. const struct spi_buf_set *tx_bufs,
  511. const struct spi_buf_set *rx_bufs,
  512. struct k_poll_signal *async)
  513. {
  514. return transceive(dev, config, tx_bufs, rx_bufs, true, async);
  515. }
  516. #endif /* CONFIG_SPI_ASYNC */
  517. static int spi_acts_release(const struct device *dev,
  518. const struct spi_config *config)
  519. {
  520. struct acts_spi_data *data = dev->data;
  521. spi_context_unlock_unconditionally(&data->ctx);
  522. return 0;
  523. }
  524. static int spi_test(const struct device *dev)
  525. {
  526. struct spi_config config = {
  527. .frequency = 24000000,
  528. .operation = SPI_OP_MODE_MASTER | SPI_WORD_SET(8),
  529. .slave = 0,
  530. };
  531. struct spi_buf_set tx_bufs;
  532. struct spi_buf_set rx_bufs;
  533. struct spi_buf tx_buf[1];
  534. struct spi_buf rx_buf[1];
  535. u8_t buf_tx[16];
  536. u8_t buf_rx[16] = {0};
  537. int ret;
  538. memset(buf_tx, 0x86, 16);
  539. printk("spi test:%s\n", dev->name);
  540. tx_buf[0].buf = buf_tx;
  541. tx_buf[0].len = 16;
  542. rx_buf[0].buf = buf_rx;
  543. rx_buf[0].len = 16;
  544. rx_bufs.buffers = rx_buf;
  545. rx_bufs.count = 1;
  546. tx_bufs.buffers = tx_buf;
  547. tx_bufs.count = 1;
  548. ret = spi_transceive(dev, &config, &tx_bufs, &rx_bufs);
  549. if(ret)
  550. printk("spi test error\n");
  551. else
  552. printk("spi test pass\n");
  553. printk("buf_rx : 0x%x 0x%x 0x%x 0x%x\n", buf_rx[0], buf_rx[1], buf_rx[14], buf_rx[15]);
  554. return 0;
  555. }
  556. int spi_acts_init(const struct device *dev)
  557. {
  558. const struct acts_spi_config *config = DEV_CFG(dev);
  559. struct acts_spi_data *data = DEV_DATA(dev);
  560. int chan;
  561. k_sem_init(&data->dma_sync, 0, 1);
  562. if (config->flag_use_dma) {
  563. data->dma_dev = device_get_binding(config->dma_dev_name);
  564. if (!data->dma_dev){
  565. LOG_ERR("dma-dev binding err:%s\n", config->dma_dev_name);
  566. return -ENODEV;
  567. }
  568. chan = dma_request(data->dma_dev, config->txdma_chan);
  569. if(chan < 0){
  570. LOG_ERR("dma-dev txchan config err chan=%d\n", config->txdma_chan);
  571. return -ENODEV;
  572. }
  573. data->txdma_chan = chan;
  574. chan = dma_request(data->dma_dev, config->rxdma_chan);
  575. if(chan < 0){
  576. LOG_ERR("dma-dev rxchan config err chan=%d\n", config->txdma_chan);
  577. return -ENODEV;
  578. }
  579. data->rxdma_chan = chan;
  580. }
  581. printk("spi:clkreg=0x%x, dma=%d \n",config->spiclk_reg, config->flag_use_dma);
  582. /* enable spi controller clock */
  583. acts_clock_peripheral_enable(config->clock_id);
  584. /* reset spi controller */
  585. acts_reset_peripheral(config->reset_id);
  586. spi_context_unlock_unconditionally(&data->ctx);
  587. spi_test(dev);
  588. return 0;
  589. }
  590. const struct spi_driver_api spi_acts_driver_api = {
  591. .transceive = spi_acts_transceive,
  592. #ifdef CONFIG_SPI_ASYNC
  593. .transceive_async = spi_acts_transceive_async,
  594. #endif
  595. .release = spi_acts_release,
  596. };
  597. #define dma_use(n) (\
  598. .dma_dev_name = CONFIG_DMA_0_NAME, \
  599. .txdma_id = CONFIG_SPI_##n##_DMA_ID,\
  600. .txdma_chan = CONFIG_SPI_##n##_TXDMA_CHAN,\
  601. .rxdma_id = CONFIG_SPI_##n##_DMA_ID,\
  602. .rxdma_chan = CONFIG_SPI_##n##_RXDMA_CHAN,\
  603. .flag_use_dma = 1, \
  604. )
  605. #define dma_not(n) (\
  606. .flag_use_dma = 0, \
  607. )
  608. #define SPI_ACTS_DEFINE_CONFIG(n) \
  609. static const struct acts_spi_config spi_acts_config_##n = { \
  610. .spi = (struct acts_spi_controller *)SPI##n##_REG_BASE,\
  611. .spiclk_reg = NULL,\
  612. .clock_id = CLOCK_ID_SPI##n,\
  613. .reset_id = RESET_ID_SPI##n,\
  614. COND_CODE_1(CONFIG_SPI_##n##_USE_DMA,dma_use(n), dma_not(n))\
  615. }
  616. #define SPI_ACTS_DEVICE_INIT(n) \
  617. SPI_ACTS_DEFINE_CONFIG(n); \
  618. static struct acts_spi_data spi_acts_dev_data_##n = { \
  619. SPI_CONTEXT_INIT_LOCK(spi_acts_dev_data_##n, ctx), \
  620. SPI_CONTEXT_INIT_SYNC(spi_acts_dev_data_##n, ctx), \
  621. }; \
  622. DEVICE_DEFINE(spi_acts_##n, \
  623. CONFIG_SPI_##n##_NAME, \
  624. &spi_acts_init, NULL, &spi_acts_dev_data_##n, \
  625. &spi_acts_config_##n, POST_KERNEL, \
  626. CONFIG_SPI_INIT_PRIORITY, &spi_acts_driver_api);
  627. #if IS_ENABLED(CONFIG_SPI_1)
  628. SPI_ACTS_DEFINE_CONFIG(1)
  629. #endif
  630. #if IS_ENABLED(CONFIG_SPI_2)
  631. SPI_ACTS_DEFINE_CONFIG(2)
  632. #endif
  633. #if IS_ENABLED(CONFIG_SPI_3)
  634. SPI_ACTS_DEFINE_CONFIG(3)
  635. #endif