spi_flash_acts.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. /*
  2. * Copyright (c) 2018 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <drivers/flash.h>
  7. #include <drivers/spi.h>
  8. #include <logging/log.h>
  9. #include <soc.h>
  10. #include <board_cfg.h>
  11. #include "spi_flash.h"
  12. #include <linker/linker-defs.h>
  13. #include <dvfs.h>
  14. #include <drivers/gpio.h>
  15. LOG_MODULE_REGISTER(spi_flash_acts, CONFIG_FLASH_LOG_LEVEL);
  16. #ifdef CONFIG_SPI_NOR_FLASH_DRV_EXT
  17. extern int spinor_enter_4byte_address_mode(struct spinor_info *sni);
  18. #endif
  19. #if defined(CONFIG_SPI_FLASH_1_GPIO_CS_EN) && (CONFIG_SPI_FLASH_1_GPIO_CS_EN == 1)
  20. static const struct device *spi_gpio_cs_dev;
  21. #endif
  22. #if (CONFIG_SPI_FLASH_SYNC_MULTI_DEV == 1)
  23. static struct k_sem spi_flash_sync = Z_SEM_INITIALIZER(spi_flash_sync, 1, 1);
  24. #endif
  25. #ifdef SPINOR_RESET_FUN_ADDR
  26. typedef void (*spi_reset_func)(struct spi_info *si);
  27. __ramfunc void spi_flash_reset(struct spi_info *si)
  28. {
  29. spi_reset_func func = (spi_reset_func)(SPINOR_RESET_FUN_ADDR);
  30. func(si);
  31. }
  32. #else
  33. __ramfunc void spi_flash_reset(struct spi_info *si)
  34. {
  35. p_spinor_api->continuous_read_reset((struct spinor_info *)si);
  36. }
  37. #endif
  38. __ramfunc void spi_flash_acts_prepare(struct spi_info *si)
  39. {
  40. /* wait for spi ready */
  41. #if !defined(CONFIG_SPI_NOR_FLASH_4B_ADDRESS) || defined(CONFIG_NOR_CODE_IN_RAM)
  42. while(!(sys_read32(SPI_STA(si->base)) & SPI_STA_READY));
  43. spi_flash_reset(si);
  44. #endif
  45. }
  46. __ramfunc void spi_flash_lock_acquire(void)
  47. {
  48. #ifndef CONFIG_NOR_CODE_IN_RAM
  49. #if defined(CONFIG_SPI_FLASH_SYNC_MULTI_DEV) && (CONFIG_SPI_FLASH_SYNC_MULTI_DEV == 1)
  50. if(!k_is_in_isr()){
  51. k_sem_take(&spi_flash_sync, K_FOREVER);
  52. }
  53. #endif
  54. #endif
  55. }
  56. __ramfunc void spi_flash_lock_release(void)
  57. {
  58. #ifndef CONFIG_NOR_CODE_IN_RAM
  59. #if defined(CONFIG_SPI_FLASH_SYNC_MULTI_DEV) && (CONFIG_SPI_FLASH_SYNC_MULTI_DEV == 1)
  60. if(!k_is_in_isr()){
  61. k_sem_give(&spi_flash_sync);
  62. }
  63. #endif
  64. #endif
  65. }
  66. __ramfunc int spi_flash_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  67. {
  68. struct spinor_info *sni = DEV_DATA(dev);
  69. int ret = 0;
  70. size_t tmplen;
  71. spi_flash_lock_acquire();
  72. tmplen = len;
  73. while(tmplen > 0) {
  74. if(tmplen < 0x8000)
  75. len = tmplen;
  76. else
  77. len = 0x8000;
  78. #if defined(CONFIG_SPI_NOR_FLASH_4B_ADDRESS) || defined(CONFIG_NOR_CODE_IN_RAM)
  79. ret = spinor_4b_addr_op_api.read(sni, offset, data, len);
  80. #else
  81. #if defined(CONFIG_SPI_FLASH_NO_IRQ_LOCK) && (CONFIG_SPI_FLASH_NO_IRQ_LOCK == 1)
  82. ret = p_spinor_api->read(sni, offset, data, len);
  83. #else
  84. uint32_t key = irq_lock();
  85. ret = p_spinor_api->read(sni, offset, data, len);
  86. irq_unlock(key);
  87. #endif
  88. #endif
  89. offset += len;
  90. data = (void *)((unsigned int )data + len);
  91. tmplen -= len;
  92. }
  93. spi_flash_lock_release();
  94. return ret;
  95. }
  96. __ramfunc int spi_flash_acts_write(const struct device *dev, off_t offset, const void *data, size_t len)
  97. {
  98. struct spinor_info *sni = DEV_DATA(dev);
  99. int ret;
  100. spi_flash_lock_acquire();
  101. #if defined(CONFIG_SPI_NOR_FLASH_4B_ADDRESS) || defined(CONFIG_NOR_CODE_IN_RAM)
  102. ret = spinor_4b_addr_op_api.write(sni, offset, data, len);
  103. #else
  104. #if defined(CONFIG_SPI_FLASH_NO_IRQ_LOCK) && (CONFIG_SPI_FLASH_NO_IRQ_LOCK == 1)
  105. uint32_t flag = sni->spi.flag;
  106. uint32_t nor_flag = sni->flag;
  107. sni->flag |= SPINOR_FLAG_UNLOCK_IRQ_WAIT_READY; //unlock wait ready
  108. sni->spi.flag &= ~SPI_FLAG_NO_IRQ_LOCK; //lock
  109. ret = p_spinor_api->write(sni, offset, data, len);
  110. sni->spi.flag = flag;
  111. sni->flag = nor_flag;
  112. #else
  113. uint32_t key = irq_lock();
  114. ret = p_spinor_api->write(sni, offset, data, len);
  115. irq_unlock(key);
  116. #endif
  117. #endif
  118. spi_flash_lock_release();
  119. return ret ;
  120. }
  121. __ramfunc int spi_flash_acts_erase(const struct device *dev, off_t offset, size_t size)
  122. {
  123. struct spinor_info *sni = DEV_DATA(dev);
  124. int ret;
  125. spi_flash_lock_acquire();
  126. #if defined(CONFIG_SPI_NOR_FLASH_4B_ADDRESS) || defined(CONFIG_NOR_CODE_IN_RAM)
  127. ret = spinor_4b_addr_op_api.erase(sni, offset, size);
  128. #else
  129. #if defined(CONFIG_SPI_FLASH_NO_IRQ_LOCK) && (CONFIG_SPI_FLASH_NO_IRQ_LOCK == 1)
  130. uint32_t flag = sni->spi.flag;
  131. uint32_t nor_flag = sni->flag;
  132. sni->flag |= SPINOR_FLAG_UNLOCK_IRQ_WAIT_READY; //unlock wait ready
  133. sni->spi.flag &= ~SPI_FLAG_NO_IRQ_LOCK; //lock
  134. ret = p_spinor_api->erase(sni, offset, size);
  135. sni->spi.flag = flag;
  136. sni->flag = nor_flag;
  137. #else
  138. uint32_t key;
  139. #define NOR_ERASE_SECTOR (4*1024)
  140. ret = 0;
  141. while(size){// erase sector once
  142. key= irq_lock();
  143. ret |= p_spinor_api->erase(sni, offset, NOR_ERASE_SECTOR);
  144. irq_unlock(key);
  145. if(size < NOR_ERASE_SECTOR){
  146. size = 0;
  147. }else{
  148. size -= NOR_ERASE_SECTOR;
  149. offset += NOR_ERASE_SECTOR;
  150. }
  151. }
  152. #endif
  153. #endif
  154. spi_flash_lock_release();
  155. return ret ;
  156. }
  157. static inline void xspi_delay(void)
  158. {
  159. volatile int i = 100000;
  160. while (i--)
  161. ;
  162. }
  163. __ramfunc void xspi_nor_enable_status_qe(struct spinor_info *sni)
  164. {
  165. uint16_t status;
  166. /* MACRONIX's spinor has different QE bit */
  167. if (XSPI_NOR_MANU_ID_MACRONIX == (sni->chipid & 0xff)) {
  168. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  169. if (!(status & 0x40)) {
  170. /* set QE bit to disable HOLD/WP pin function */
  171. status |= 0x40;
  172. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  173. (u8_t *)&status, 1);
  174. }
  175. return;
  176. }
  177. /* check QE bit */
  178. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  179. if (!(status & 0x2)) {
  180. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  181. status |= 0x2;
  182. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  183. (u8_t *)&status, 1);
  184. /* check QE bit again */
  185. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  186. if (!(status & 0x2)) {
  187. /* oh, let's try old write status cmd, for GigaDevice/Berg */
  188. status = ((status | 0x2) << 8) |
  189. p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  190. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  191. (u8_t *)&status, 2);
  192. }
  193. }
  194. xspi_delay();
  195. }
  196. static inline void xspi_setup_bus_width(struct spinor_info *sni, u8_t bus_width)
  197. {
  198. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  199. spi->ctrl = (spi->ctrl & ~(0x3 << 10)) | (((bus_width & 0x7) / 2 + 1) << 10);
  200. xspi_delay();
  201. }
  202. static __sleepfunc void xspi_setup_delaychain(struct spinor_info *sni, u8_t ns)
  203. {
  204. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  205. spi->ctrl = (spi->ctrl & ~(0xF << 16)) | (ns << 16);
  206. xspi_delay();
  207. }
  208. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  209. extern int nor_test_delaychain(const struct device *dev);
  210. #endif
  211. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  212. extern void nor_dual_quad_read_mode_try(struct spinor_info *sni);
  213. #endif
  214. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  215. struct nor_delaychain_tbl {
  216. uint16_t vdd_volt;
  217. uint8_t delay;
  218. };
  219. #ifdef CONFIG_SPI_NOR_FLASH_4B_ADDRESS
  220. static const struct nor_delaychain_tbl nor_delaychains[] = {
  221. {950, 9},
  222. {1000, 10},
  223. {1100, 10},
  224. {1150, 10},
  225. {1200, 11},
  226. };
  227. #else
  228. static const struct nor_delaychain_tbl nor_delaychains[] = {
  229. {950, 11},
  230. {1000, 10},
  231. {1100, 11},
  232. {1150, 11},
  233. {1200, 13},
  234. };
  235. #endif
  236. static inline void nor_set_delaychain_by_vdd(struct spinor_info *sni, uint16_t vdd)
  237. {
  238. uint8_t i;
  239. for (i = 0; i < ARRAY_SIZE(nor_delaychains); i++) {
  240. if (nor_delaychains[i].vdd_volt == vdd) {
  241. xspi_setup_delaychain(sni, nor_delaychains[i].delay);
  242. break;
  243. }
  244. }
  245. }
  246. __dvfs_notifier_func static void nor_dvfs_notify(void *user_data, struct dvfs_freqs *dvfs_freq)
  247. {
  248. struct spinor_info *sni = (struct spinor_info *)user_data;
  249. struct dvfs_level *old_dvfs_level, *new_dvfs_level;
  250. uint32_t key;
  251. if (!dvfs_freq) {
  252. printk("dvfs notify invalid param");
  253. return ;
  254. }
  255. if (dvfs_freq->old_level == dvfs_freq->new_level)
  256. return ;
  257. key = irq_lock();
  258. old_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->old_level);
  259. new_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->new_level);
  260. if (old_dvfs_level->vdd_volt > new_dvfs_level->vdd_volt) {
  261. /* vdd voltage decrease */
  262. if (dvfs_freq->state == DVFS_EVENT_PRE_CHANGE) {
  263. if (new_dvfs_level->vdd_volt < 1100)
  264. clk_set_rate(CLOCK_ID_SPI0, MHZ(64));
  265. else
  266. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  267. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  268. printk("nor delaychain update by vdd:%d => %d\n",
  269. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  270. }
  271. } else {
  272. /* vdd voltage increase */
  273. if (dvfs_freq->state == DVFS_EVENT_POST_CHANGE) {
  274. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  275. if (new_dvfs_level->vdd_volt < 1100)
  276. clk_set_rate(CLOCK_ID_SPI0, MHZ(64));
  277. else
  278. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  279. printk("nor delaychain update by vdd:%d => %d\n",
  280. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  281. }
  282. }
  283. irq_unlock(key);
  284. }
  285. static struct spinor_info spi_flash_acts_data;
  286. static struct dvfs_notifier __dvfs_notifier_data nor_dvsf_notifier = {
  287. .dvfs_notify_func_t = nor_dvfs_notify,
  288. .user_data = &spi_flash_acts_data,
  289. };
  290. #endif /* CONFIG_ACTS_DVFS_DYNAMIC_LEVEL */
  291. __ramfunc int spi_flash_acts_init(const struct device *dev)
  292. {
  293. struct spinor_info *sni = DEV_DATA(dev);
  294. uint32_t key;
  295. uint8_t status, status2, status3;
  296. sni->spi.prepare_hook = spi_flash_acts_prepare;
  297. key = irq_lock();
  298. sni->chipid = p_spinor_api->read_chipid(sni)& 0xffffff;
  299. printk("read spi nor chipid:0x%x\n", sni->chipid);
  300. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  301. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  302. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  303. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  304. #ifdef CONFIG_NOR_SUSPEND_RESUME
  305. #ifndef CONFIG_SPINOR_TEST_DELAYCHAIN
  306. if(status2 & (NOR_STATUS2_SUS1|NOR_STATUS2_SUS2))
  307. spinor_resume_finished(sni);
  308. #endif
  309. #endif
  310. #ifdef CONFIG_SPI_NOR_FLASH_DRV_EXT
  311. void spinor_enable_xip_mode(struct spinor_info *sni);
  312. spinor_enable_xip_mode(sni);
  313. #endif
  314. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  315. nor_dual_quad_read_mode_try(sni);
  316. printk("bus width : %d, and cache read use ", sni->spi.bus_width);
  317. #else
  318. if(sni->spi.bus_width == 4) {
  319. printk("nor is 4 line mode\n");
  320. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  321. xspi_nor_enable_status_qe(sni);
  322. /* enable 4x mode */
  323. xspi_setup_bus_width(sni, 4);
  324. } else if(sni->spi.bus_width == 2) {
  325. printk("nor is 2 line mode\n");
  326. /* enable 2x mode */
  327. xspi_setup_bus_width(sni, 2);
  328. } else {
  329. sni->spi.bus_width = 1;
  330. printk("nor is 1 line mode\n");
  331. /* enable 1x mode */
  332. xspi_setup_bus_width(sni, 1);
  333. }
  334. #endif
  335. /* setup SPI clock rate */
  336. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  337. /* configure delay chain */
  338. xspi_setup_delaychain(sni, sni->spi.delay_chain);
  339. /* check delay chain workable */
  340. sni->chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  341. printk("read again spi nor chipid:0x%x\n", sni->chipid);
  342. #ifdef CONFIG_SPI_NOR_FLASH_4B_ADDRESS
  343. spinor_enter_4byte_address_mode(sni);
  344. #endif
  345. #if defined(CONFIG_SPI_FLASH_1_GPIO_CS_EN) && (CONFIG_SPI_FLASH_1_GPIO_CS_EN == 1)
  346. spi_gpio_cs_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  347. if (!spi_gpio_cs_dev) {
  348. printk("failed to get gpio:%d device", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  349. irq_unlock(key);
  350. return -1;
  351. }
  352. gpio_pin_configure(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, GPIO_OUTPUT);
  353. gpio_pin_set(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, 1);
  354. printk("use GPIO:%d as spi cs pin", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  355. #endif
  356. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  357. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  358. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  359. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  360. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  361. nor_test_delaychain(dev);
  362. #endif
  363. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  364. dvfs_register_notifier(&nor_dvsf_notifier);
  365. #endif
  366. irq_unlock(key);
  367. flash_write_protection_set(dev, true);
  368. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  369. spinor_test_uid_securty(dev);
  370. #endif
  371. return 0;
  372. }
  373. #if defined(CONFIG_SPI_FLASH_1_GPIO_CS_EN) && (CONFIG_SPI_FLASH_1_GPIO_CS_EN == 1)
  374. static void spi_flash_acts_cs_gpio(struct spi_info *si, int value)
  375. {
  376. if (spi_gpio_cs_dev) {
  377. gpio_pin_set(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, value ? true : false);
  378. k_busy_wait(1);
  379. }
  380. }
  381. #endif
  382. #ifndef CONFIG_BOARD_NANDBOOT
  383. typedef int (*nor_send_command)(unsigned char cmd);
  384. __sleepfunc void sys_norflash_power_ctrl(uint32_t is_powerdown)
  385. {
  386. nor_send_command p_send_command = (nor_send_command)0x00003759;
  387. volatile int i;
  388. u32_t spi_ctl_ori = sys_read32(SPI0_REG_BASE);
  389. /* If spi mode is not the disable or write only mode, we need to disable firstly */
  390. if (((spi_ctl_ori & 0x3) != 0) && ((spi_ctl_ori & 0x3) != 2)) {
  391. sys_write32(sys_read32(SPI0_REG_BASE) & ~(3 << 0), SPI0_REG_BASE);
  392. for (i = 0; i < 5; i++) {
  393. ;
  394. }
  395. }
  396. /* enable AHB interface for cpu write cmd */
  397. sys_write32(0xa013A, SPI0_REG_BASE);
  398. for(i = 0; i < 20; i++) {
  399. ;
  400. }
  401. if (is_powerdown){
  402. /* 4x io need send 0xFF to exit the continuous mode */
  403. if (spi_ctl_ori & (0x3 << 10))
  404. p_send_command(0xFF);
  405. p_send_command(0xB9);
  406. soc_udelay(5); // max 3us
  407. } else {
  408. p_send_command(0xAB);
  409. soc_udelay(40); // max 30us
  410. }
  411. /* set spi in disable mode */
  412. sys_write32(sys_read32(SPI0_REG_BASE) & ~(3 << 0), SPI0_REG_BASE);
  413. for (i = 0; i < 5; i++) {
  414. ;
  415. }
  416. sys_write32(spi_ctl_ori, SPI0_REG_BASE);
  417. }
  418. #endif
  419. #ifdef CONFIG_SPI_NOR_FLASH_4B_ADDRESS
  420. __sleepfunc void sys_norflash_exit_4b(void)
  421. {
  422. //printk("spinor exit 4-byte address mode\n");
  423. p_spinor_api->write_status(NULL, 0xE9, NULL, 0);
  424. }
  425. __sleepfunc void sys_norflash_enter_4b(void)
  426. {
  427. //printk("spinor enter 4-byte address mode\n");
  428. p_spinor_api->write_status(NULL, 0xB7, NULL, 0);
  429. }
  430. #endif
  431. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  432. static void spi_flash_acts_pages_layout(
  433. const struct device *dev,
  434. const struct flash_pages_layout **layout,
  435. size_t *layout_size)
  436. {
  437. *layout = &(DEV_CFG(dev)->pages_layout);
  438. *layout_size = 1;
  439. }
  440. #endif /* IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT) */
  441. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  442. extern int nor_write_protection(const struct device *dev, bool enable);
  443. #endif
  444. __ramfunc int spi_flash_acts_write_protection(const struct device *dev, bool enable)
  445. {
  446. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  447. spi_flash_lock_acquire();
  448. nor_write_protection(dev, enable);
  449. spi_flash_lock_release();
  450. #else
  451. struct spinor_info *sni = DEV_DATA(dev);
  452. uint8_t status1, status2, sta_en;
  453. uint8_t sta[2];
  454. uint32_t key;
  455. if (sni->chipid == 0x1560c8){//3085 nor
  456. key = irq_lock();
  457. status1 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  458. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  459. sta_en = (status1 >> 2) & 0x1f; //bp4-bp0 bit6-bit2
  460. if(enable){
  461. if(!sta_en){ // if disable
  462. #define PROTECT_16KB 0x1b
  463. #define PROTECT_64KB 0x9
  464. #define PROTECT_128KB 0xa
  465. #define PROTECT_256KB 0xb
  466. #define PROTECT_512KB 0xc
  467. #define PROTECT_1MB 0xd
  468. sta[0] = status1 | (PROTECT_16KB << 2); //bit6-bit2 = bp4-bp0 =
  469. sta[1] = status2 & (~0x40);
  470. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS, sta, 2);
  471. printk("enable status1-2: {0x%02x 0x%02x}\n", sta[0], sta[1]);
  472. }
  473. }else{
  474. if(sta_en){ // if enable
  475. sta[0] = status1 & 0x83 ; //bit6-bit2 = bp4-bp0 = 00000 = disable protect
  476. sta[1] = status2 & (~0x40);
  477. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS, sta, 2);
  478. printk("disable status1-2: {0x%02x 0x%02x}\n", sta[0], sta[1]);
  479. }
  480. }
  481. irq_unlock(key);
  482. }
  483. #endif
  484. return 0;
  485. }
  486. static const struct flash_parameters flash_acts_parameters = {
  487. .write_block_size = 0x1000,
  488. .erase_value = 0xff,
  489. };
  490. static const struct flash_parameters *
  491. spi_flash_get_parameters(const struct device *dev)
  492. {
  493. ARG_UNUSED(dev);
  494. return &flash_acts_parameters;
  495. }
  496. #ifdef CONFIG_PM_DEVICE
  497. int spi_flash_pm_control(const struct device *device, enum pm_device_action action)
  498. {
  499. if(action == PM_DEVICE_ACTION_LATE_RESUME){
  500. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x1 << 5) , SPICACHE_CTL);
  501. //printk("late reusme = 0x%x\n", sys_read32(SPICACHE_CTL));
  502. }else if(action == PM_DEVICE_ACTION_EARLY_SUSPEND){
  503. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x2 << 5) , SPICACHE_CTL);
  504. //printk("nor early suspend = 0x%x\n", sys_read32(SPICACHE_CTL));
  505. }
  506. return 0;
  507. }
  508. #else
  509. #define spi_flash_pm_control NULL
  510. #endif
  511. static struct flash_driver_api spi_flash_nor_api = {
  512. .read = spi_flash_acts_read,
  513. .write = spi_flash_acts_write,
  514. .erase = spi_flash_acts_erase,
  515. .write_protection = spi_flash_acts_write_protection,
  516. .get_parameters = spi_flash_get_parameters,
  517. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  518. .page_layout = spi_flash_acts_pages_layout,
  519. #endif
  520. };
  521. /* system XIP spinor */
  522. static struct spinor_info spi_flash_acts_data = {
  523. .spi = {
  524. .base = SPI0_REG_BASE,
  525. .bus_width = CONFIG_SPI_FLASH_BUS_WIDTH,
  526. .delay_chain = CONFIG_SPI_FLASH_DELAY_CHAIN,
  527. #if (CONFIG_SPI_FLASH_1 == 0) && (CONFIG_SPI_FLASH_2 == 0)
  528. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  529. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  530. #endif
  531. #endif
  532. #if defined(CONFIG_SPI_FLASH_NO_IRQ_LOCK) && (CONFIG_SPI_FLASH_NO_IRQ_LOCK == 1)
  533. .flag = SPI_FLAG_NO_IRQ_LOCK,
  534. #else
  535. .flag = 0,
  536. #endif
  537. },
  538. .flag = 0,
  539. };
  540. static const struct spi_flash_acts_config spi_acts_config = {
  541. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  542. .pages_layout = {
  543. .pages_count = CONFIG_SPI_FLASH_CHIP_SIZE/0x1000,
  544. .pages_size = 0x1000,
  545. },
  546. #endif
  547. .chip_size = CONFIG_SPI_FLASH_CHIP_SIZE,
  548. .page_size = 0x1000,
  549. };
  550. #if IS_ENABLED(CONFIG_SPI_FLASH_0)
  551. DEVICE_DEFINE(spi_flash_acts, CONFIG_SPI_FLASH_NAME, &spi_flash_acts_init, spi_flash_pm_control,
  552. &spi_flash_acts_data, &spi_acts_config, PRE_KERNEL_1,
  553. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &spi_flash_nor_api);
  554. #endif
  555. #if (CONFIG_SPI_FLASH_1 == 1)
  556. /* system XIP spinor */
  557. static struct spinor_info spi_flash_1_acts_data = {
  558. .spi = {
  559. .base = SPI0_REG_BASE,
  560. .bus_width = CONFIG_SPI_FLASH_1_BUS_WIDTH,
  561. .delay_chain = CONFIG_SPI_FLASH_1_DELAY_CHAIN,
  562. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  563. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  564. #endif
  565. #if defined(CONFIG_SPI_FLASH_NO_IRQ_LOCK) && (CONFIG_SPI_FLASH_NO_IRQ_LOCK == 1)
  566. .flag = SPI_FLAG_NO_IRQ_LOCK,
  567. #else
  568. .flag = 0,
  569. #endif
  570. #if defined(CONFIG_SPI_FLASH_1_GPIO_CS_EN) && (CONFIG_SPI_FLASH_1_GPIO_CS_EN == 1)
  571. .set_cs = spi_flash_acts_cs_gpio,
  572. #endif
  573. },
  574. .flag = 0,
  575. };
  576. static const struct spi_flash_acts_config spi_flash_1_acts_config = {
  577. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  578. .pages_layout = {
  579. .pages_count = CONFIG_SPI_FLASH_1_CHIP_SIZE/0x1000,
  580. .pages_size = 0x1000,
  581. },
  582. #endif
  583. .chip_size = CONFIG_SPI_FLASH_1_CHIP_SIZE,
  584. .page_size = 0x1000,
  585. };
  586. DEVICE_DEFINE(spi_flash_1_acts, CONFIG_SPI_FLASH_1_NAME, &spi_flash_acts_init, NULL,
  587. &spi_flash_1_acts_data, &spi_flash_1_acts_config, PRE_KERNEL_1,
  588. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &spi_flash_nor_api);
  589. #endif
  590. #if IS_ENABLED(CONFIG_SPI_FLASH_2)
  591. const struct spinor_operation_api *g_spi3_nor_api;
  592. static K_MUTEX_DEFINE(flash_2_mutex);
  593. static int spi_flash_2_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  594. {
  595. struct spinor_info *sni = DEV_DATA(dev);
  596. int ret = 0;
  597. size_t tmplen;
  598. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  599. tmplen = len;
  600. while(tmplen > 0) {
  601. if(tmplen < 0x8000)
  602. len = tmplen;
  603. else
  604. len = 0x8000;
  605. ret = g_spi3_nor_api->read(sni, offset, data, len);
  606. offset += len;
  607. data = (void *)((unsigned int )data + len);
  608. tmplen -= len;
  609. }
  610. k_mutex_unlock(&flash_2_mutex);
  611. return ret;
  612. }
  613. static int spi_flash_2_acts_write(const struct device *dev, off_t offset, const void *data, size_t len)
  614. {
  615. struct spinor_info *sni = DEV_DATA(dev);
  616. int ret;
  617. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  618. ret = g_spi3_nor_api->write(sni, offset, data, len);
  619. k_mutex_unlock(&flash_2_mutex);
  620. return ret ;
  621. }
  622. static int spi_flash_2_acts_erase(const struct device *dev, off_t offset, size_t size)
  623. {
  624. struct spinor_info *sni = DEV_DATA(dev);
  625. int ret;
  626. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  627. ret = g_spi3_nor_api->erase(sni, offset, size);
  628. k_mutex_unlock(&flash_2_mutex);
  629. return ret ;
  630. }
  631. static int spi_flash_2_pwoer(struct spinor_info *sni, bool on)
  632. {
  633. #if IS_ENABLED(CONFIG_SPI_FLASH_2_USE_GPIO_POWER)
  634. int ret;
  635. int gpio_value = CONFIG_SPI_FLASH_2_GPIO_POWER_LEVEL;
  636. const struct device *power_gpio_dev;
  637. uint8_t power_gpio = CONFIG_SPI_FLASH_2_POWER_GPIO % 32;
  638. power_gpio_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  639. if (!power_gpio_dev) {
  640. LOG_ERR("Failed to bind nor power GPIO(%d:%s)", power_gpio, CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  641. return -1;
  642. }
  643. ret = gpio_pin_configure(power_gpio_dev, power_gpio, GPIO_OUTPUT);
  644. if (ret) {
  645. LOG_ERR("Failed to config output GPIO:%d", power_gpio);
  646. return ret;
  647. }
  648. if (on) {
  649. /* power on nor */
  650. gpio_pin_set(power_gpio_dev, power_gpio, gpio_value);
  651. } else {
  652. /* power off nor */
  653. gpio_pin_set(power_gpio_dev, power_gpio, !gpio_value);
  654. }
  655. #else
  656. if (on) {
  657. spinor_write_cmd(sni, 0xAB); //exit deep power down
  658. } else {
  659. spinor_write_cmd(sni, 0xB9); // enter deep power down
  660. }
  661. #endif
  662. return 0;
  663. }
  664. #ifdef CONFIG_PM_DEVICE
  665. int spi_flash_2_pm_control(const struct device *device, enum pm_device_action action)
  666. {
  667. struct spinor_info *sni = DEV_DATA(device);
  668. if(action == PM_DEVICE_ACTION_RESUME){
  669. LOG_INF("spi2 nor resume ...\n");
  670. spi_flash_2_pwoer(sni, true);
  671. }else if(action == PM_DEVICE_ACTION_SUSPEND){
  672. LOG_INF("spi2 nor suspend ...\n");
  673. spi_flash_2_pwoer(sni, false);
  674. }
  675. return 0;
  676. }
  677. #else
  678. #define spi_flash_2_pm_control NULL
  679. #endif
  680. static int spi_flash_2_acts_init(const struct device *dev)
  681. {
  682. struct spinor_info *sni = DEV_DATA(dev);
  683. uint8_t status, status2, status3;
  684. printk("spi3 flash init\n");
  685. g_spi3_nor_api = spi3nor_get_api();
  686. /* enable spi3 controller clock */
  687. acts_clock_peripheral_enable(CLOCK_ID_SPI3);
  688. /* reset spi3 controller */
  689. acts_reset_peripheral(RESET_ID_SPI3);
  690. /* setup SPI3 clock rate */
  691. clk_set_rate(CLOCK_ID_SPI3, MHZ(CONFIG_SPI_FLASH_2_FREQ_MHZ));
  692. spi_flash_2_pwoer(sni, true);
  693. sni->chipid = g_spi3_nor_api->read_chipid(sni);
  694. printk("read spi3 nor chipid:0x%x\n", sni->chipid);
  695. status = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  696. status2 = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  697. status3 = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  698. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  699. if(sni->spi.bus_width == 4) {
  700. printk("data nor is 4 line mode\n");
  701. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  702. /* check QE bit */
  703. if (!(status2 & 0x2)) {
  704. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  705. status2 |= 0x2;
  706. g_spi3_nor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  707. (u8_t *)&status2, 1);
  708. }
  709. } else if(sni->spi.bus_width == 2) {
  710. printk("data nor is 2 line mode\n");
  711. } else {
  712. sni->spi.bus_width = 1;
  713. printk("data nor is 1 line mode\n");
  714. }
  715. /* check delay chain workable */
  716. sni->chipid = g_spi3_nor_api->read_chipid(sni);
  717. printk("read again spi3 nor chipid:0x%x\n", sni->chipid);
  718. #if (CONFIG_SPI_FLASH_2_CHIP_SIZE >= 0x20000000)
  719. spinor_enter_4byte_address_mode(sni);
  720. #endif
  721. status = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  722. status2 = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  723. status3 = g_spi3_nor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  724. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  725. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  726. nor_test_delaychain(dev);
  727. #endif
  728. return 0;
  729. }
  730. static struct flash_driver_api spi_flash_2_nor_api = {
  731. .read = spi_flash_2_acts_read,
  732. .write = spi_flash_2_acts_write,
  733. .erase = spi_flash_2_acts_erase,
  734. .get_parameters = spi_flash_get_parameters,
  735. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  736. .page_layout = spi_flash_acts_pages_layout,
  737. #endif
  738. };
  739. static struct spinor_info spi_flash_2_acts_data = {
  740. .spi = {
  741. .base = SPI3_REG_BASE,
  742. .bus_width = CONFIG_SPI_FLASH_2_BUS_WIDTH,
  743. .delay_chain = CONFIG_SPI_FLASH_2_DELAY_CHAIN,
  744. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  745. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  746. #endif
  747. .flag = SPI_FLAG_NO_IRQ_LOCK,
  748. },
  749. .flag = 0,
  750. };
  751. static const struct spi_flash_acts_config spi_flash_2_acts_config = {
  752. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  753. .pages_layout = {
  754. .pages_count = CONFIG_SPI_FLASH_2_CHIP_SIZE/0x1000,
  755. .pages_size = 0x1000,
  756. },
  757. #endif
  758. .chip_size = CONFIG_SPI_FLASH_2_CHIP_SIZE,
  759. .page_size = 0x1000,
  760. };
  761. DEVICE_DEFINE(spi_flash_2_acts, CONFIG_SPI_FLASH_2_NAME, &spi_flash_2_acts_init, spi_flash_2_pm_control,
  762. &spi_flash_2_acts_data, &spi_flash_2_acts_config, POST_KERNEL,
  763. CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, &spi_flash_2_nor_api);
  764. #endif
  765. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  766. #define NOR_SE_PAGE_SIZE 256
  767. #define NOR_SE_PAGE_MASK (NOR_SE_PAGE_SIZE-1)
  768. #define NOR_SE_MAX_SIZE_EACH_REGN 1024 /**/
  769. /*security_regn 0-3*/
  770. int spi_flash_security_erase(const struct device *dev, unsigned int security_regn)
  771. {
  772. struct spinor_info *sni = DEV_DATA(dev);
  773. return spinor_erase_security(sni, security_regn<<12);
  774. }
  775. int spi_flash_security_write(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  776. {
  777. unsigned int wlen, unlen;
  778. struct spinor_info *sni = DEV_DATA(dev);
  779. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  780. return -1;
  781. unlen = offset & NOR_SE_PAGE_MASK;
  782. while(len){
  783. if(unlen){
  784. wlen = NOR_SE_PAGE_SIZE - unlen;
  785. if(wlen > len)
  786. wlen = len;
  787. unlen = 0;
  788. }else{
  789. if(len < NOR_SE_PAGE_SIZE)
  790. wlen = len;
  791. else
  792. wlen = NOR_SE_PAGE_SIZE;
  793. }
  794. spinor_write_security(sni, (security_regn<<12)|offset, data, wlen);
  795. data = (unsigned char *)data + wlen;
  796. len -= wlen;
  797. offset += wlen;
  798. }
  799. return 0;
  800. }
  801. int spi_flash_security_read(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  802. {
  803. struct spinor_info *sni = DEV_DATA(dev);
  804. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  805. return -1;
  806. return spinor_read_security(sni, (security_regn<<12)|offset, data, len);
  807. }
  808. int spi_flash_uid_read(const struct device *dev, void *uid, unsigned int len)
  809. {
  810. struct spinor_info *sni = DEV_DATA(dev);
  811. return spinor_read_uid(sni, uid, len);
  812. }
  813. #include <string.h>
  814. static unsigned int g_tmp_buf[256];
  815. void spinor_test_uid_securty(const struct device *dev)
  816. {
  817. unsigned int start_ms,end_ms, i, k;
  818. unsigned int *pb = g_tmp_buf;
  819. spi_flash_uid_read(dev, pb, 16);
  820. printk("uid=0x%x, 0x%x, 0x%x, 0x%x\n", pb[0], pb[1], pb[2], pb[3]);
  821. for(i = 1; i < 4; i++){// test security1-security3
  822. start_ms = k_cycle_get_32();
  823. spi_flash_security_erase(dev, i);
  824. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  825. printk("scurity erase %d use=%d ms\n", i, end_ms);
  826. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  827. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {// check erase ok
  828. if(pb[k] != 0xffffffff){
  829. printk("erase check fail %d : off=0x%x, 0x%x!=0xffffffff\n", i, k*4, pb[k]);
  830. break;
  831. }
  832. }
  833. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {
  834. pb[k] = k + 0x12345600*i;
  835. }
  836. start_ms = k_cycle_get_32();
  837. spi_flash_security_write(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  838. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  839. printk("scurity write 1KB %d use=%d ms\n", i, end_ms);
  840. }
  841. for(i = 1; i < 4; i++){
  842. memset(pb, 0, NOR_SE_PAGE_SIZE);
  843. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  844. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++){
  845. if(pb[k] != k + 0x12345600*i){
  846. printk("scurity read cmp fail:%d,off=0x%x,0x%x!=0x%x\n",i, k*4, pb[k], k + 0x12345600*i);
  847. break;
  848. }
  849. }
  850. }
  851. printk("secutrity test finished\n");
  852. }
  853. #endif