spi_flash_leopard.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873
  1. /*
  2. * Copyright (c) 2018 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <drivers/flash.h>
  7. #include <drivers/spi.h>
  8. #include <logging/log.h>
  9. #include <soc.h>
  10. #include <board_cfg.h>
  11. #include "spi_flash.h"
  12. #include <linker/linker-defs.h>
  13. #include <dvfs.h>
  14. #include <drivers/gpio.h>
  15. /* spinor parameters */
  16. #define SPINOR_WRITE_PAGE_SIZE_BITS 8
  17. #define SPINOR_ERASE_SECTOR_SIZE_BITS 12
  18. #define SPINOR_ERASE_BLOCK_SIZE_BITS 16
  19. #define SPINOR_WRITE_PAGE_SIZE (1 << SPINOR_WRITE_PAGE_SIZE_BITS)
  20. #define SPINOR_ERASE_SECTOR_SIZE (1 << SPINOR_ERASE_SECTOR_SIZE_BITS)
  21. #define SPINOR_ERASE_BLOCK_SIZE (1 << SPINOR_ERASE_BLOCK_SIZE_BITS)
  22. #define SPINOR_WRITE_PAGE_MASK (SPINOR_WRITE_PAGE_SIZE - 1)
  23. #define SPINOR_ERASE_SECTOR_MASK (SPINOR_ERASE_SECTOR_SIZE - 1)
  24. #define SPINOR_ERASE_BLOCK_MASK (SPINOR_ERASE_BLOCK_SIZE - 1)
  25. #define SPINOR_CMD_PROGRAM_ERASE_RESUME 0x7a /* nor resume */
  26. #define SPINOR_CMD_PROGRAM_ERASE_SUSPEND 0x75 /* nor suspend */
  27. #define SPINOR_CMD_READ_STATUS 0x05 /* read status1 */
  28. #define SPIMEM_CMD_ENABLE_WRITE 0x06 /* enable write */
  29. #define SPIMEM_TFLAG_WRITE_DATA 0x08
  30. #define SPINOR_CMD_WR_VOL_CFG 0x81 /*Write volatile Configuration Registe*/
  31. #define SPINOR_CMD_RD_VOL_CFG 0x85 /*read volatile Configuration Registe*/
  32. LOG_MODULE_REGISTER(spi_flash_acts, CONFIG_FLASH_LOG_LEVEL);
  33. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  34. static const struct device *spi_gpio_cs_dev;
  35. unsigned int nor_cs0_size, nor_cs1_size;
  36. unsigned char nor_cs0_delaytran, nor_cs1_delaytran;
  37. __ramfunc static void spi_flash_acts_cs_gpio(struct spi_info *si, int value)
  38. {
  39. if (spi_gpio_cs_dev) {
  40. if (value) {
  41. sys_write32(GPIO_BIT(CONFIG_SPI_FLASH_1_GPIO_CS_PIN), GPION_BSR(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  42. } else {
  43. sys_write32(GPIO_BIT(CONFIG_SPI_FLASH_1_GPIO_CS_PIN), GPION_BRR(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  44. }
  45. }
  46. }
  47. __ramfunc void spi_flash1_cs_select(struct spinor_info *sni, int select)
  48. {
  49. if(select){
  50. if(nor_cs1_size > 0x1000000)
  51. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  52. else
  53. sni->flag &= ~SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  54. sni->spi.set_cs = spi_flash_acts_cs_gpio;
  55. sni->spi.delay_chain = nor_cs1_delaytran;
  56. }else{
  57. if(nor_cs0_size > 0x1000000)
  58. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  59. else
  60. sni->flag &= ~SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  61. sni->spi.set_cs = NULL;
  62. sni->spi.delay_chain = nor_cs0_delaytran;
  63. }
  64. }
  65. /*for delaytran scan */
  66. unsigned char spi_flash_set_delaytran(const struct device *dev, off_t offset, unsigned char delaytran)
  67. {
  68. unsigned char old;
  69. struct spinor_info *sni = DEV_DATA(dev);
  70. if(offset < nor_cs0_size){
  71. old = nor_cs0_delaytran;
  72. nor_cs0_delaytran = delaytran;
  73. sni->spi.delay_chain = nor_cs0_delaytran;
  74. }else{
  75. old = nor_cs1_delaytran;
  76. nor_cs1_delaytran = delaytran;
  77. }
  78. return old;
  79. }
  80. #else
  81. /*for delaytran scan */
  82. unsigned char spi_flash_set_delaytran(const struct device *dev, off_t offset, unsigned char delaytran)
  83. {
  84. unsigned char old;
  85. struct spinor_info *sni = DEV_DATA(dev);
  86. old = sni->spi.delay_chain;
  87. sni->spi.delay_chain = delaytran;
  88. return old;
  89. }
  90. #endif
  91. __sleepfunc void spi_flash_exit_continuous(struct spi_info *si)
  92. {
  93. p_spinor_api->continuous_read_reset((struct spinor_info *)si);
  94. }
  95. __sleepfunc void spi_flash_acts_prepare(struct spi_info *si)
  96. {
  97. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  98. void * bak_cs;
  99. bak_cs = si->set_cs;
  100. si->set_cs = NULL;
  101. #endif
  102. /* wait for spi ready */
  103. while(!(sys_read32(SPI_STA(si->base)) & SPI_STA_READY)){
  104. }
  105. if(!(si->flag & SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ)) {
  106. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  107. if(!(si->flag & SPI_FLAG_QPI_MODE))
  108. #endif
  109. spi_flash_exit_continuous(si);
  110. }
  111. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  112. if(bak_cs != NULL)
  113. si->set_cs = bak_cs;
  114. #endif
  115. }
  116. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  117. __ramfunc static void spi0_dtr_set_clk(uint32_t rate_hz)
  118. {
  119. uint32_t core_pll, div, real_rate, val;
  120. core_pll = MHZ(((sys_read32(COREPLL_CTL)&0x3F)*8));
  121. div = (core_pll+rate_hz-1)/rate_hz;
  122. real_rate = core_pll/div;
  123. val = (div-1)|(1<<8) | (1<<12);
  124. sys_write32(val, CMU_SPI0CLK);
  125. }
  126. __ramfunc static unsigned int spi0_cache_enter_dtr_mode(struct spinor_info *sni, uint32_t clk_mhz)
  127. {
  128. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  129. spi->ctrl |= 1<<19;
  130. spi->delaychain |= 1<< 8; // 10 dummy
  131. //sys_set_bit(CMU_SPI0CLK, 12); // CLKD DDR MODE
  132. spi0_dtr_set_clk(MHZ(clk_mhz) * 2);
  133. return p_spinor_api->read_chipid(sni)& 0xffffff;// read chipid for exit contimue mode;
  134. }
  135. #if 0
  136. __ramfunc static unsigned int spi0_cache_exit_dtr_mode(struct spinor_info *sni)
  137. {
  138. //struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  139. //spi->ctrl &= ~(1<<19);
  140. //sys_clear_bit(CMU_SPI0CLK, 12);
  141. return 0;
  142. }
  143. #endif
  144. #endif
  145. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  146. #define SPIMEM_TFLAG_MIO_CMD_ADDR_DATA 0x04
  147. __ramfunc static void xspi_nor_enable_qpi(struct spinor_info *sni)
  148. {
  149. if(sni->spi.bus_width != 4)
  150. return;
  151. p_spinor_api->transfer(sni, XSPI_NOR_CMD_QPI_ENABLE, 0, 0, NULL, 0, 0, 0);
  152. sni->spi.flag &= ~SPI_FLAG_QPI_MODE;
  153. }
  154. __ramfunc static void xspi_nor_disable_qpi(struct spinor_info *sni)
  155. {
  156. if(sni->spi.bus_width != 4)
  157. return;
  158. p_spinor_api->transfer(sni, XSPI_NOR_CMD_QPI_DISABLE, 0, 0, NULL, 0, 0, SPIMEM_TFLAG_MIO_CMD_ADDR_DATA);
  159. sni->spi.flag |= SPI_FLAG_QPI_MODE;
  160. }
  161. __ramfunc static void xspi_nor_qpi_init(struct spinor_info *sni)
  162. {
  163. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  164. if(sni->spi.bus_width == 4){
  165. xspi_nor_enable_qpi(sni);
  166. sni->spi.flag |= SPI_FLAG_QPI_MODE;
  167. p_spinor_api->transfer(sni, XSPI_NOR_CMD_SETPARA_QPI, 2<<4, 1, NULL, 0, 0, SPIMEM_TFLAG_MIO_CMD_ADDR_DATA); // set 6 dummy clk
  168. sni->spi.flag &= ~SPI_FLAG_QPI_MODE;
  169. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  170. spi->ctrl |= 1<<18; // spi 0 enable dtr qpi
  171. #else
  172. spi->ctrl |= 1<<17; // spi 0 enable qpi
  173. #endif
  174. }
  175. }
  176. #endif
  177. static volatile unsigned int xip_lock_cnt = 0;
  178. void spi0_nor_xip_lock(void)
  179. {
  180. uint32_t key;
  181. key= irq_lock();
  182. xip_lock_cnt++;
  183. irq_unlock(key);
  184. if(xip_lock_cnt > 20){
  185. printk("spi0_nor_xip_lock err=%d\n", xip_lock_cnt);
  186. k_panic();
  187. }
  188. }
  189. void spi0_nor_xip_unlock(void)
  190. {
  191. uint32_t key;
  192. key= irq_lock();
  193. if(xip_lock_cnt){
  194. xip_lock_cnt--;
  195. }else{
  196. printk("spi0_nor_xip_unlock err\n");
  197. k_panic();
  198. }
  199. irq_unlock(key);
  200. }
  201. #define SPINOR_FUN_READ 0
  202. #define SPINOR_FUN_WRITE 1
  203. #define SPINOR_FUN_ERASE 2
  204. #define SPINOR_FUN_SUSPEND 3
  205. static unsigned int spinor_xip_lock_check(int fun)
  206. {
  207. unsigned int key, tcnt = 0;
  208. while(1){
  209. key = irq_lock();
  210. if(xip_lock_cnt == 0)
  211. break;
  212. irq_unlock(key);
  213. if(!k_is_in_isr()){
  214. k_msleep(2);
  215. }else{
  216. printk("nor w/e in irq\n");
  217. soc_udelay(5000);// wait master transfer finished
  218. break;
  219. }
  220. tcnt += 2;
  221. if(tcnt > 1000){
  222. printk("err: nor xip check=%d over 1S\n", fun);
  223. tcnt = 0;
  224. if(k_is_in_isr())// if panic, over 1s,break to save ramdump
  225. xip_lock_cnt = 0;
  226. }
  227. }
  228. return key;
  229. }
  230. #ifdef CONFIG_SPI_XIP_READ
  231. static void spi_flash_xip_init(void)
  232. {
  233. int err = soc_memctrl_mapping(CONFIG_SPI_XIP_VADDR, 0 , 0);
  234. if (err) {
  235. LOG_ERR(" flash xip map fail %d\n", err);
  236. }else{
  237. LOG_INF("flash xip map ok v=0x%x\n", CONFIG_SPI_XIP_VADDR);
  238. }
  239. }
  240. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  241. static int spi_flash_acts_read(const struct device *dev, uint64_t offset, void *data, uint64_t len)
  242. {
  243. unsigned int xip_start;
  244. struct spinor_info *sni = DEV_DATA(dev);
  245. int ret = 0;
  246. size_t tmplen;
  247. uint32_t key;
  248. if(offset >= nor_cs0_size){
  249. offset -= nor_cs0_size;
  250. tmplen = len;
  251. while(tmplen > 0) {
  252. if(tmplen < 0x8000)
  253. len = tmplen;
  254. else
  255. len = 0x8000;
  256. key = spinor_xip_lock_check(SPINOR_FUN_READ);
  257. spi_flash1_cs_select(sni, 1);
  258. ret = p_spinor_api->read(sni, offset, data, len);
  259. spi_flash1_cs_select(sni, 0);
  260. irq_unlock(key);
  261. offset += len;
  262. data = (void *)((unsigned int )data + len);
  263. tmplen -= len;
  264. }
  265. }else{
  266. xip_start = CONFIG_SPI_XIP_VADDR + offset;
  267. pbrom_libc_api->p_memcpy(data, (void *)xip_start, len);
  268. }
  269. return ret;
  270. }
  271. #else
  272. #define XIP_NOR_MAX_LEN 0x2000000 // xip only support 32MB
  273. static int spi_flash_acts_read(const struct device *dev, uint64_t offset, void *data, uint64_t len)
  274. {
  275. unsigned int xip_start, key;
  276. size_t tlen;
  277. int ret;
  278. struct spinor_info *sni = DEV_DATA(dev);
  279. if(offset < XIP_NOR_MAX_LEN){
  280. xip_start = CONFIG_SPI_XIP_VADDR + offset;
  281. if(offset+len > XIP_NOR_MAX_LEN){
  282. tlen = XIP_NOR_MAX_LEN - offset;
  283. pbrom_libc_api->p_memcpy(data, (void *)xip_start, tlen);
  284. offset = XIP_NOR_MAX_LEN;
  285. len -= tlen;
  286. data =(void *)((unsigned int)data + tlen);
  287. }else{
  288. pbrom_libc_api->p_memcpy(data, (void *)xip_start, len);
  289. return 0;
  290. }
  291. }
  292. while(len) {
  293. if(len < 0x8000)
  294. tlen = len;
  295. else
  296. tlen = 0x8000;
  297. key = spinor_xip_lock_check(SPINOR_FUN_READ);
  298. ret = p_spinor_api->read(sni, offset, data, tlen);
  299. irq_unlock(key);
  300. offset += tlen;
  301. data = (void *)((unsigned int )data + tlen);
  302. len -= tlen;
  303. }
  304. return 0;
  305. }
  306. #endif
  307. #else
  308. __ramfunc int spi_flash_acts_read(const struct device *dev, uint64_t offset, void *data, uint64_t len)
  309. {
  310. struct spinor_info *sni = DEV_DATA(dev);
  311. int ret = 0;
  312. size_t tmplen;
  313. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  314. int cs_sel;
  315. if(offset < nor_cs0_size){
  316. cs_sel = 0;
  317. }else{
  318. cs_sel = 1;
  319. offset -= nor_cs0_size;
  320. }
  321. #endif
  322. tmplen = len;
  323. while(tmplen > 0) {
  324. if(tmplen < 0x8000)
  325. len = tmplen;
  326. else
  327. len = 0x8000;
  328. uint32_t key = spinor_xip_lock_check(SPINOR_FUN_READ);
  329. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  330. if(cs_sel) {
  331. spi_flash1_cs_select(sni, 1);
  332. }
  333. #endif
  334. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  335. xspi_nor_disable_qpi(sni);
  336. #endif
  337. ret = p_spinor_api->read(sni, offset, data, len);
  338. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  339. xspi_nor_enable_qpi(sni);
  340. #endif
  341. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  342. if(cs_sel){
  343. spi_flash1_cs_select(sni, 0);
  344. }
  345. #endif
  346. irq_unlock(key);
  347. offset += len;
  348. data = (void *)((unsigned int )data + len);
  349. tmplen -= len;
  350. }
  351. return ret;
  352. }
  353. #endif
  354. #ifdef CONFIG_NOR_SUSPEND_RESUME
  355. /*
  356. XT25F64F: suspend&resume 会导致0x0 地址翻转,需要fix ic bug,发resume 命令之前要读一下erase 地址,
  357. 然后再发resume 命令(resume 命令不能发退出continue 模式指令(2 个ff))。
  358. */
  359. #define XT25F64F_CHIPID 0x17400b
  360. __ramfunc static void spinor_suspend(struct spinor_info *sni)
  361. {
  362. int i, j;
  363. // program/erase suspend
  364. for(j = 0; j < 3; j++){
  365. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_SUSPEND);
  366. soc_udelay(30);
  367. for(i = 0; i < 100; i++) { //max 500us, tSUS must 30us
  368. soc_udelay(5);
  369. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)){
  370. break;
  371. }
  372. }
  373. if(i != 100){
  374. break;
  375. }
  376. }
  377. }
  378. __ramfunc static bool spinor_resume_and_check_idle(struct spinor_info *sni, unsigned int addr)
  379. {
  380. bool ret;
  381. uint32_t key, i;
  382. char tmp[8];
  383. key = spinor_xip_lock_check(SPINOR_FUN_SUSPEND);
  384. // program/erase resum
  385. if(sni->chipid == XT25F64F_CHIPID){ /*fix XT25F64F nor bug*/
  386. p_spinor_api->read(sni, addr, tmp, 4);
  387. sni->spi.flag |= SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  388. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  389. sni->spi.flag &= ~SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  390. }else{
  391. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  392. }
  393. soc_udelay(30);
  394. for(i = 0; i < 100; i++){ // wait to exit suspend
  395. soc_udelay(5);
  396. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2) & 0x80)){
  397. break;
  398. }
  399. }
  400. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)) {
  401. ret = true;
  402. }else {
  403. for(i = 0; i < 20; i++){ // handle 1000 us
  404. soc_udelay(50);
  405. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)){
  406. break;
  407. }
  408. }
  409. if(i != 20){
  410. ret = true;
  411. }else{
  412. spinor_suspend(sni);
  413. ret = false;
  414. }
  415. }
  416. irq_unlock(key);
  417. return ret;
  418. }
  419. __ramfunc static void spinor_wait_finished(struct spinor_info *sni, unsigned int addr)
  420. {
  421. int i;
  422. uint32_t key;
  423. for(i = 0; i < 20000; i++){ //2000*500us= 10000ms overtimer
  424. if (spinor_resume_and_check_idle(sni, addr))
  425. break;
  426. if(!k_is_in_isr()){
  427. if((i & 0x1) == 0)
  428. k_msleep(1);
  429. }
  430. }
  431. if(i == 20000){
  432. LOG_INF("nor resume error\n");
  433. key = spinor_xip_lock_check(SPINOR_FUN_SUSPEND);
  434. while(p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1); // wait nor ready
  435. irq_unlock(key);
  436. }
  437. }
  438. void spinor_resume_finished(struct spinor_info *sni);
  439. static void spi_flash_suspend_finished(struct spinor_info *sni)
  440. {
  441. if(!k_is_in_isr())
  442. return;
  443. if(sni->flag & SPINOR_FLAG_NO_WAIT_READY){
  444. spinor_resume_finished(sni);
  445. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  446. }
  447. }
  448. K_MUTEX_DEFINE(spinor_w_mutex);
  449. static void spi_flash_w_lock(void)
  450. {
  451. if(!k_is_in_isr()){
  452. k_mutex_lock(&spinor_w_mutex, K_FOREVER);
  453. }
  454. }
  455. static void spi_flash_w_unlock(void)
  456. {
  457. if(!k_is_in_isr()){
  458. k_mutex_unlock(&spinor_w_mutex);
  459. }
  460. }
  461. __ramfunc void spinor_resume_finished(struct spinor_info *sni)
  462. {
  463. LOG_INF("nor is suspend, wait resume finished\n");
  464. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  465. soc_udelay(5);
  466. while(p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1); // wait nor ready
  467. }
  468. #else // CONFIG_NOR_SUSPEND_RESUME must NOT define
  469. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  470. __ramfunc static int spinor_2cs_wait_ready(struct spinor_info *sni)
  471. {
  472. unsigned char status;
  473. uint32_t key;
  474. while (1) {
  475. key = spinor_xip_lock_check(SPINOR_FUN_WRITE);
  476. spi_flash1_cs_select(sni, 1);
  477. status = p_spinor_api->read_status(sni, SPINOR_CMD_READ_STATUS);
  478. spi_flash1_cs_select(sni, 0);
  479. irq_unlock(key);
  480. if (!(status & 0x1))
  481. break;
  482. if(!k_is_in_isr()){
  483. k_msleep(2);
  484. }else{
  485. soc_udelay(2000);
  486. }
  487. }
  488. return 0;
  489. }
  490. __ramfunc void spinor_cs0_check_irq(struct spinor_info *sni)
  491. {
  492. if (k_is_in_isr()) {
  493. if (sni->flag & SPINOR_FLAG_NO_WAIT_READY) {
  494. soc_udelay(100000);//delay 100ms for nor2 erase timer
  495. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  496. }
  497. }
  498. }
  499. #endif //end #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  500. #endif // endif #ifdef CONFIG_NOR_SUSPEND_RESUME
  501. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  502. extern int check_panic_exe(void);
  503. #endif
  504. static int spi_flash_not_wr(void)
  505. {
  506. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  507. if (k_is_in_isr() && !check_panic_exe()) {
  508. printk("flash not allow write in irq\n");
  509. k_panic();
  510. return 1;
  511. }
  512. #endif
  513. return 0;
  514. }
  515. __ramfunc int spi_flash_acts_write(const struct device *dev, uint64_t offset, const void *data, uint64_t len)
  516. {
  517. struct spinor_info *sni = DEV_DATA(dev);
  518. int ret = 0;
  519. int wlen;
  520. uint32_t key;
  521. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  522. int cs_sel;
  523. if(offset < nor_cs0_size){
  524. cs_sel = 0;
  525. spinor_cs0_check_irq(sni);
  526. }else{
  527. cs_sel = 1;
  528. offset -= nor_cs0_size;
  529. spinor_2cs_wait_ready(sni);
  530. }
  531. #endif
  532. if (spi_flash_not_wr())
  533. return -1;
  534. #ifdef CONFIG_NOR_SUSPEND_RESUME
  535. spi_flash_w_lock();
  536. spi_flash_suspend_finished(sni);
  537. #endif
  538. while(len > 0) {
  539. if(len > SPINOR_WRITE_PAGE_SIZE)
  540. wlen = SPINOR_WRITE_PAGE_SIZE;
  541. else
  542. wlen = len;
  543. key = spinor_xip_lock_check(SPINOR_FUN_WRITE);
  544. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  545. if(cs_sel) {
  546. spi_flash1_cs_select(sni, 1);
  547. }
  548. #endif
  549. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  550. xspi_nor_disable_qpi(sni);
  551. #endif
  552. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  553. //if(sni->flag & SPI_FLAG_WR_4IO)
  554. // spi0_cache_exit_dtr_mode(sni);
  555. #endif
  556. ret = p_spinor_api->write(sni, offset, data, wlen);
  557. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  558. //if(sni->flag & SPI_FLAG_WR_4IO)
  559. //spi0_cache_enter_dtr_mode(sni);
  560. #endif
  561. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  562. xspi_nor_enable_qpi(sni);
  563. #endif
  564. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  565. if(cs_sel) {
  566. spi_flash1_cs_select(sni, 0);
  567. }
  568. #endif
  569. irq_unlock(key);
  570. offset += wlen;
  571. data = (void *)((unsigned int )data + wlen);
  572. len -= wlen;
  573. }
  574. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  575. if(!cs_sel) {
  576. #endif
  577. #ifdef CONFIG_SPI_XIP_READ
  578. soc_memctrl_cache_invalid();
  579. #endif
  580. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  581. }
  582. #endif
  583. #ifdef CONFIG_NOR_SUSPEND_RESUME
  584. spi_flash_w_unlock();
  585. #endif
  586. return ret ;
  587. }
  588. __ramfunc int spi_flash_acts_erase(const struct device *dev, uint64_t offset, uint64_t size)
  589. {
  590. struct spinor_info *sni = DEV_DATA(dev);
  591. int ret = 0;
  592. uint32_t key;
  593. size_t erase_size = SPINOR_ERASE_SECTOR_SIZE;
  594. uint32_t t0,t1, t2;
  595. int use_block = 0;
  596. if (spi_flash_not_wr())
  597. return -1;
  598. #ifndef CONFIG_SPINOR_TEST_DELAYCHAIN
  599. LOG_INF("nor_e:offset=0x%llx,len=0x%llx\n", offset, size);
  600. #endif
  601. #ifdef CONFIG_NOR_SUSPEND_RESUME
  602. bool b_suspend = true;
  603. use_block = 1;
  604. if((size >= SPINOR_ERASE_BLOCK_SIZE*4) || (k_is_in_isr())) // erase 256kb or panic, not suspend &resume
  605. b_suspend = false;
  606. spi_flash_w_lock();
  607. spi_flash_suspend_finished(sni);
  608. if(b_suspend)
  609. sni->flag |= SPINOR_FLAG_NO_WAIT_READY;
  610. #else
  611. if(size >= SPINOR_ERASE_BLOCK_SIZE*4) // erase 256kb, use block
  612. use_block = 1;
  613. #endif
  614. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  615. int cs_sel;
  616. if(offset < nor_cs0_size){
  617. cs_sel = 0;
  618. spinor_cs0_check_irq(sni);
  619. }else{
  620. cs_sel = 1;
  621. offset -= nor_cs0_size;
  622. spinor_2cs_wait_ready(sni);
  623. use_block = 1;
  624. }
  625. #endif
  626. while (size > 0) {
  627. if(use_block) {
  628. if (size < SPINOR_ERASE_BLOCK_SIZE) {
  629. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  630. } else if (offset & SPINOR_ERASE_BLOCK_MASK) {
  631. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  632. } else {
  633. erase_size = SPINOR_ERASE_BLOCK_SIZE;
  634. }
  635. }
  636. key= spinor_xip_lock_check(SPINOR_FUN_ERASE);
  637. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  638. if(cs_sel) {
  639. sni->flag |= SPINOR_FLAG_NO_WAIT_READY;
  640. spi_flash1_cs_select(sni, 1);
  641. }
  642. #endif
  643. t0 = k_cycle_get_32();
  644. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  645. xspi_nor_disable_qpi(sni);
  646. #endif
  647. ret = p_spinor_api->erase(sni, offset, erase_size);
  648. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  649. xspi_nor_enable_qpi(sni);
  650. #endif
  651. #ifdef CONFIG_NOR_SUSPEND_RESUME
  652. if(b_suspend)
  653. spinor_suspend(sni);
  654. #endif
  655. #ifdef CONFIG_SPINOR_TEST_DELAYCHAIN
  656. soc_udelay(100000); // try fail, nor status may not finished, delay erase finished
  657. #endif
  658. t1 = k_cycle_get_32();
  659. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  660. if(cs_sel) {
  661. spi_flash1_cs_select(sni, 0);
  662. }
  663. #endif
  664. irq_unlock(key);
  665. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  666. if(cs_sel) {
  667. spinor_2cs_wait_ready(sni);
  668. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  669. }
  670. #endif
  671. #ifdef CONFIG_NOR_SUSPEND_RESUME
  672. if(b_suspend)
  673. spinor_wait_finished(sni, offset);
  674. #endif
  675. t2 = k_cycle_get_32();
  676. #ifndef CONFIG_SPINOR_TEST_DELAYCHAIN
  677. LOG_INF("nor_e:off=0x%x,len=0x%x, tran=%d us, wait=%d\n", (uint32_t)offset, erase_size,
  678. k_cyc_to_us_ceil32(t1-t0), k_cyc_to_us_ceil32(t2-t1));
  679. #endif
  680. size -= erase_size;
  681. offset += erase_size;
  682. }
  683. #ifdef CONFIG_SPI_XIP_READ
  684. soc_memctrl_cache_invalid();
  685. #endif
  686. #ifdef CONFIG_NOR_SUSPEND_RESUME
  687. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  688. spi_flash_w_unlock();
  689. #endif
  690. return ret ;
  691. }
  692. static __ramfunc void xspi_delay(void)
  693. {
  694. volatile int i = 100000;
  695. while (i--)
  696. ;
  697. }
  698. __ramfunc void xspi_nor_enable_status_qe(struct spinor_info *sni)
  699. {
  700. uint16_t status;
  701. /* MACRONIX's spinor has different QE bit */
  702. if (XSPI_NOR_MANU_ID_MACRONIX == (sni->chipid & 0xff)) {
  703. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  704. if (!(status & 0x40)) {
  705. /* set QE bit to disable HOLD/WP pin function */
  706. status |= 0x40;
  707. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  708. (u8_t *)&status, 1);
  709. }
  710. return;
  711. }
  712. /* check QE bit */
  713. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  714. if (!(status & 0x2)) {
  715. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  716. status |= 0x2;
  717. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  718. (u8_t *)&status, 1);
  719. /* check QE bit again */
  720. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  721. if (!(status & 0x2)) {
  722. /* oh, let's try old write status cmd, for GigaDevice/Berg */
  723. status = ((status | 0x2) << 8) |
  724. p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  725. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  726. (u8_t *)&status, 2);
  727. }
  728. }
  729. xspi_delay();
  730. }
  731. static __ramfunc void xspi_setup_bus_width(struct spinor_info *sni, u8_t bus_width)
  732. {
  733. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  734. spi->ctrl = (spi->ctrl & ~(0x3 << 10)) | (((bus_width & 0x7) / 2 + 1) << 10);
  735. xspi_delay();
  736. }
  737. static __sleepfunc void xspi_setup_delaychain(struct spinor_info *sni, u8_t delay)
  738. {
  739. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  740. spi->delaychain = (spi->delaychain & ~(0x3F << 0)) | (delay << 0);
  741. xspi_delay();
  742. }
  743. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  744. extern int nor_test_delaychain(const struct device *dev);
  745. #endif
  746. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  747. extern void nor_dual_quad_read_mode_try(struct spinor_info *sni);
  748. #endif
  749. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  750. #include "flash_delaytran_table.c"
  751. //static const struct nor_delaychain_tbl *g_p_chipid_tbl;
  752. static const struct id_nor_delaychain_tbl *g_chipid_tbl;
  753. static void nor0_delaytran_init(uint32_t chip_id)
  754. {
  755. uint8_t i;
  756. g_chipid_tbl = &chipid_dl_tbl[0];
  757. for (i = 1; i < ARRAY_SIZE(chipid_dl_tbl); i++) {
  758. if (chipid_dl_tbl[i].chip_id == chip_id) {
  759. g_chipid_tbl = &chipid_dl_tbl[i];
  760. printk("nor find dl tbl=%d\n", i);
  761. break;
  762. }
  763. }
  764. }
  765. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  766. static const struct nor_delaychain_tbl *g_cs1_p_chipid_tbl = NULL;
  767. static void nor1_delaytran_init(uint32_t chip_id)
  768. {
  769. uint8_t i;
  770. g_cs1_p_chipid_tbl = chipid_dl_tbl[0].tbl;
  771. for (i = 1; i < ARRAY_SIZE(chipid_dl_tbl); i++) {
  772. if (chipid_dl_tbl[i].chip_id == chip_id) {
  773. g_cs1_p_chipid_tbl = chipid_dl_tbl[i].tbl;
  774. printk("nor cs1 find dl tbl=%d\n", i);
  775. break;
  776. }
  777. }
  778. }
  779. #endif
  780. static __ramfunc void nor_set_delaychain_by_vdd(struct spinor_info *sni, uint16_t vdd)
  781. {
  782. uint8_t i;
  783. const struct nor_delaychain_tbl *ptbl;
  784. ptbl = g_chipid_tbl->tbl;
  785. for (i = 0; i < CHIP_ID_TBL_NUM; i++) {
  786. if (ptbl[i].vdd_volt == vdd) {
  787. xspi_setup_delaychain(sni, ptbl[i].delay);
  788. sni->spi.delay_chain = ptbl[i].delay; //same as xip
  789. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  790. nor_cs0_delaytran = sni->spi.delay_chain;
  791. if(g_cs1_p_chipid_tbl == NULL){
  792. nor_cs1_delaytran = nor_cs0_delaytran;
  793. }else{
  794. nor_cs1_delaytran = g_cs1_p_chipid_tbl[i].delay;
  795. }
  796. #endif
  797. break;
  798. }
  799. }
  800. }
  801. static __ramfunc void spi0_set_clk_by_vdd(uint16_t vdd_volt)
  802. {
  803. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  804. if (vdd_volt <= 1000) {
  805. spi0_dtr_set_clk(MHZ(48) * 2);
  806. } else {
  807. spi0_dtr_set_clk(MHZ(g_chipid_tbl->max_clk) * 2);
  808. }
  809. #else
  810. if (vdd_volt < 1000) {
  811. clk_set_rate(CLOCK_ID_SPI0, MHZ(64));
  812. } else {
  813. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  814. }
  815. #endif
  816. }
  817. __dvfs_notifier_func static void nor_dvfs_notify(void *user_data, struct dvfs_freqs *dvfs_freq)
  818. {
  819. struct spinor_info *sni = (struct spinor_info *)user_data;
  820. struct dvfs_level *old_dvfs_level, *new_dvfs_level;
  821. uint32_t key;
  822. if (!dvfs_freq) {
  823. printk("dvfs notify invalid param");
  824. return ;
  825. }
  826. if (dvfs_freq->old_level == dvfs_freq->new_level)
  827. return ;
  828. old_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->old_level);
  829. new_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->new_level);
  830. if (old_dvfs_level->vdd_volt == new_dvfs_level->vdd_volt) {
  831. return;
  832. }
  833. key = irq_lock();
  834. if (old_dvfs_level->vdd_volt > new_dvfs_level->vdd_volt) {
  835. /* vdd voltage decrease */
  836. if (dvfs_freq->state == DVFS_EVENT_PRE_CHANGE) {
  837. spi0_set_clk_by_vdd(new_dvfs_level->vdd_volt);
  838. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  839. printk("nor delaychain update by vdd:%d => %d\n",
  840. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  841. }
  842. } else {
  843. /* vdd voltage increase */
  844. if (dvfs_freq->state == DVFS_EVENT_POST_CHANGE) {
  845. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  846. spi0_set_clk_by_vdd(new_dvfs_level->vdd_volt);
  847. printk("nor delaychain update by vdd:%d => %d\n",
  848. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  849. }
  850. }
  851. irq_unlock(key);
  852. }
  853. static struct spinor_info spi_flash_acts_data;
  854. static struct dvfs_notifier __dvfs_notifier_data nor_dvsf_notifier = {
  855. .dvfs_notify_func_t = nor_dvfs_notify,
  856. .user_data = &spi_flash_acts_data,
  857. };
  858. #endif /* CONFIG_ACTS_DVFS_DYNAMIC_LEVEL */
  859. void spinor_test_uid_securty(const struct device *dev);
  860. __ramfunc static int spinor_wait_ready(struct spinor_info *sni)
  861. {
  862. unsigned char status;
  863. while (1) {
  864. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  865. if (!(status & 0x1))
  866. break;
  867. }
  868. return 0;
  869. }
  870. __ramfunc static int spi_flash_cfg_read(struct spinor_info *sni, uint32_t addr, uint8_t *cfg)
  871. {
  872. u8_t addr_len;
  873. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  874. if (cid > 24){
  875. addr_len = 4;
  876. }else{
  877. addr_len = 3;
  878. }
  879. return p_spinor_api->transfer(sni, SPINOR_CMD_RD_VOL_CFG, addr, addr_len, cfg, 1, 1, 0);
  880. }
  881. __ramfunc static int spi_flash_cfg_write(struct spinor_info *sni, uint32_t addr, uint8_t *cfg)
  882. {
  883. int ret;
  884. u8_t addr_len;
  885. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  886. if (cid > 24){
  887. addr_len = 4;
  888. }else{
  889. addr_len = 3;
  890. }
  891. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  892. ret = p_spinor_api->transfer(sni, SPINOR_CMD_WR_VOL_CFG, addr, addr_len, cfg, 1, 0, SPIMEM_TFLAG_WRITE_DATA);
  893. spinor_wait_ready(sni);
  894. return ret;
  895. }
  896. #define GD25B512M_CHIP_ID 0x1a47c8
  897. __ramfunc void spi_flash_init_by_chpid(struct spinor_info *sni)
  898. {
  899. uint8_t cfg = 0;
  900. if(sni->chipid == GD25B512M_CHIP_ID){
  901. spi_flash_cfg_read(sni, 6, &cfg);
  902. if(cfg &0x01){
  903. cfg &= 0xfe;
  904. spi_flash_cfg_write(sni, 6, &cfg); // xip enableed
  905. printk("eable xip\n");
  906. }
  907. }
  908. }
  909. __ramfunc int spi_flash_acts_init(const struct device *dev)
  910. {
  911. struct spinor_info *sni = DEV_DATA(dev);
  912. uint32_t key;
  913. uint8_t status, status2, status3;
  914. unsigned char cid;
  915. sni->spi.prepare_hook = spi_flash_acts_prepare;
  916. key = irq_lock();
  917. #ifndef CONFIG_SPI0_NOR_DTR_MODE //dtr mode not set clk
  918. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ/2)); // set low clk for read chipid
  919. #endif
  920. sni->chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  921. cid = (sni->chipid & 0xff0000)>>16;
  922. printk("read spi nor chipid:0x%x, cid=%d\n", sni->chipid, cid);
  923. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  924. nor0_delaytran_init(sni->chipid);
  925. sni->spi.freq_khz = KHZ(g_chipid_tbl->max_clk);
  926. #endif
  927. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  928. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  929. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  930. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  931. #ifdef CONFIG_NOR_SUSPEND_RESUME
  932. if(status2 & (NOR_STATUS2_SUS1|NOR_STATUS2_SUS2))
  933. spinor_resume_finished(sni);
  934. #endif
  935. if(cid > 24){ //capacity = 2^cid (byte)
  936. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  937. sys_set_bit(sni->spi.base , 16); //spi clt bit16 4byte mode
  938. }
  939. spi_flash_init_by_chpid(sni);
  940. /* configure delay chain */
  941. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  942. nor_set_delaychain_by_vdd(sni, 1200);
  943. #else
  944. xspi_setup_delaychain(sni, sni->spi.delay_chain);
  945. #endif
  946. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  947. nor_dual_quad_read_mode_try(sni);
  948. printk("bus width : %d, and cache read use ", sni->spi.bus_width);
  949. #else
  950. if(sni->spi.bus_width == 4) {
  951. printk("nor is 4 line mode\n");
  952. #ifndef CONFIG_SPI_4X_READ
  953. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  954. #endif
  955. xspi_nor_enable_status_qe(sni);
  956. /* enable 4x mode */
  957. xspi_setup_bus_width(sni, 4);
  958. } else if(sni->spi.bus_width == 2) {
  959. printk("nor is 2 line mode\n");
  960. /* enable 2x mode */
  961. xspi_setup_bus_width(sni, 2);
  962. } else {
  963. sni->spi.bus_width = 1;
  964. printk("nor is 1 line mode\n");
  965. /* enable 1x mode */
  966. xspi_setup_bus_width(sni, 1);
  967. }
  968. #endif
  969. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  970. ///*dtr mode set 4line to set mode must xip run code*/
  971. if(sni->spi.bus_width == 4){
  972. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  973. sni->chipid = spi0_cache_enter_dtr_mode(sni, sni->spi.freq_khz/1000);
  974. #else
  975. sni->chipid = spi0_cache_enter_dtr_mode(sni, CONFIG_SPI_FLASH_FREQ_MHZ);
  976. #endif
  977. printk("spinor dtr mode , chipid:0x%x, clkreg=0x%x\n", sni->chipid, sys_read32(CMU_SPI0CLK));
  978. //while(loop);
  979. }
  980. #else
  981. /* setup SPI clock rate */
  982. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  983. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  984. #else
  985. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  986. #endif
  987. #endif
  988. /* check delay chain workable */
  989. sni->chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  990. printk("read again spi nor chipid:0x%x\n", sni->chipid);
  991. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  992. unsigned int chip_id;
  993. spi_gpio_cs_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  994. if (!spi_gpio_cs_dev) {
  995. printk("failed to get gpio:%d device", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  996. irq_unlock(key);
  997. return -1;
  998. }
  999. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ/2)); // set low clk for read cs1 delaytran
  1000. nor_cs0_size = 1 << ((sni->chipid >> 16)&0xff);
  1001. gpio_pin_configure(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, GPIO_OUTPUT_HIGH);
  1002. gpio_pin_set(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, 1);
  1003. printk("use GPIO:%d as spi cs pin\n", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  1004. spi_flash1_cs_select(sni, 1);
  1005. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  1006. soc_udelay(100);
  1007. p_spinor_api->write_cmd(sni, 0xff);// reset nor
  1008. chip_id = sni->chipid; // back nor0 chipid
  1009. sni->chipid = p_spinor_api->read_chipid(sni);
  1010. nor_cs1_size = 1 << ((sni->chipid >> 16)&0xff);
  1011. printk("cs0 nor size=0x%x, cs1 nor chipid:0x%x, size=0x%x\n", nor_cs0_size, sni->chipid, nor_cs1_size);
  1012. if(nor_cs1_size > 0x1000000)
  1013. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  1014. xspi_nor_enable_status_qe(sni);
  1015. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1016. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1017. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1018. printk("cs1 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1019. spi_flash1_cs_select(sni, 0);
  1020. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  1021. nor1_delaytran_init(sni->chipid);// init cs1 delaytran table
  1022. nor_set_delaychain_by_vdd(sni, 1200); //init default delaytran by vdd
  1023. #else
  1024. nor_cs0_delaytran = sni->spi.delay_chain;
  1025. nor_cs1_delaytran = nor_cs0_delaytran;
  1026. #endif
  1027. sni->chipid = chip_id;
  1028. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  1029. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  1030. #else
  1031. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  1032. #endif
  1033. #endif //#if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  1034. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1035. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1036. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1037. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1038. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  1039. //volatile unsigned loop = 1;
  1040. if(sni->spi.bus_width == 4){
  1041. //while(loop);
  1042. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  1043. sni->chipid = spi0_cache_enter_dtr_mode(sni, g_chipid_tbl->max_clk);
  1044. #else
  1045. sni->chipid = spi0_cache_enter_dtr_mode(sni, CONFIG_SPI_FLASH_FREQ_MHZ);
  1046. #endif
  1047. printk("spinor dtr mode , chipid:0x%x\n", sni->chipid);
  1048. //while(loop);
  1049. }
  1050. #endif
  1051. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  1052. printk("qpi enable\n");
  1053. xspi_nor_qpi_init(sni);
  1054. printk("qpi enable ok\n");
  1055. #endif
  1056. //printk("nor power down\n");
  1057. //p_spinor_api->transfer(sni, 0xB9, 0, 0, NULL, 0, 0, 0); // power down
  1058. //printk("nor power down end\n");
  1059. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  1060. nor_test_delaychain(dev);
  1061. #endif
  1062. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  1063. dvfs_register_notifier(&nor_dvsf_notifier);
  1064. #endif
  1065. #ifdef CONFIG_SPI_XIP_READ
  1066. spi_flash_xip_init();
  1067. #endif
  1068. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  1069. spinor_test_uid_securty(dev);
  1070. #endif
  1071. irq_unlock(key);
  1072. flash_write_protection_set(dev, true);
  1073. return 0;
  1074. }
  1075. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1076. static void spi_flash_acts_pages_layout(
  1077. const struct device *dev,
  1078. const struct flash_pages_layout **layout,
  1079. size_t *layout_size)
  1080. {
  1081. *layout = &(DEV_CFG(dev)->pages_layout);
  1082. *layout_size = 1;
  1083. }
  1084. #endif /* IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT) */
  1085. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  1086. extern int nor_write_protection(const struct device *dev, bool enable);
  1087. #endif
  1088. __ramfunc int spi_flash_acts_write_protection(const struct device *dev, bool enable)
  1089. {
  1090. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  1091. nor_write_protection(dev, enable);
  1092. #endif
  1093. return 0;
  1094. }
  1095. static const struct flash_parameters flash_acts_parameters = {
  1096. .write_block_size = 0x1000,
  1097. .erase_value = 0xff,
  1098. };
  1099. static const struct flash_parameters *
  1100. spi_flash_get_parameters(const struct device *dev)
  1101. {
  1102. ARG_UNUSED(dev);
  1103. return &flash_acts_parameters;
  1104. }
  1105. #ifdef CONFIG_PM_DEVICE
  1106. int spi_flash_pm_control(const struct device *device, enum pm_device_action action)
  1107. {
  1108. if(action == PM_DEVICE_ACTION_LATE_RESUME){
  1109. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x1 << 5) , SPICACHE_CTL);
  1110. //printk("late reusme = 0x%x\n", sys_read32(SPICACHE_CTL));
  1111. }else if(action == PM_DEVICE_ACTION_EARLY_SUSPEND){
  1112. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x2 << 5) , SPICACHE_CTL);
  1113. //printk("nor early suspend = 0x%x\n", sys_read32(SPICACHE_CTL));
  1114. }
  1115. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  1116. struct spinor_info *sni = DEV_DATA(device);
  1117. int i;
  1118. unsigned int chipid;
  1119. if(action == PM_DEVICE_ACTION_RESUME){
  1120. printk("spi0 cs2 resume ...\n");
  1121. spi_flash1_cs_select(sni, 1);
  1122. for(i = 0; i < 3; i++){
  1123. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  1124. soc_udelay(40);
  1125. chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  1126. if(sni->chipid != chipid){
  1127. printk("%d cs2 resume 0x%x != 0x%x fail \n", i, chipid, sni->chipid);
  1128. soc_udelay(1000);
  1129. }else{
  1130. break;
  1131. }
  1132. }
  1133. spi_flash1_cs_select(sni, 0);
  1134. }else if(action == PM_DEVICE_ACTION_SUSPEND){
  1135. printk("spi0 cs2 suspend ...\n");
  1136. spi_flash1_cs_select(sni, 1);
  1137. p_spinor_api->write_cmd(sni, 0xB9); // enter deep power down
  1138. spi_flash1_cs_select(sni, 0);
  1139. }
  1140. #endif
  1141. return 0;
  1142. }
  1143. #else
  1144. #define spi_flash_pm_control NULL
  1145. #endif
  1146. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  1147. K_MUTEX_DEFINE(spinor_cs2_w_mutex);
  1148. static void spi_flash_cs2_lock(void)
  1149. {
  1150. if(!k_is_in_isr()){
  1151. k_mutex_lock(&spinor_cs2_w_mutex, K_FOREVER);
  1152. }
  1153. }
  1154. static void spi_flash_cs2_unlock(void)
  1155. {
  1156. if(!k_is_in_isr()){
  1157. k_mutex_unlock(&spinor_cs2_w_mutex);
  1158. }
  1159. }
  1160. int spi_flash_acts_2cs_read(const struct device *dev, uint64_t offset, void *data, uint64_t len)
  1161. {
  1162. size_t tlen;
  1163. int ret;
  1164. if(offset < nor_cs0_size){
  1165. if(offset+len > nor_cs0_size){
  1166. tlen = nor_cs0_size - offset;
  1167. spi_flash_acts_read(dev, offset, data, tlen);
  1168. offset = nor_cs0_size;
  1169. data = (void *)((unsigned int )data + tlen);
  1170. len -= tlen;
  1171. }
  1172. }
  1173. if(offset < nor_cs0_size){// read cs0 not lock
  1174. ret = spi_flash_acts_read(dev, offset, data, len);
  1175. }else{
  1176. spi_flash_cs2_lock();
  1177. ret = spi_flash_acts_read(dev, offset, data, len);
  1178. spi_flash_cs2_unlock();
  1179. }
  1180. return ret;
  1181. }
  1182. int spi_flash_acts_2cs_write(const struct device *dev, off_t offset, const void *data, size_t len)
  1183. {
  1184. size_t tlen;
  1185. int ret;
  1186. if (spi_flash_not_wr())
  1187. return -1;
  1188. spi_flash_cs2_lock();
  1189. if(offset < nor_cs0_size){
  1190. if(offset+len > nor_cs0_size){
  1191. tlen = nor_cs0_size - offset;
  1192. spi_flash_acts_write(dev, offset, data, tlen);
  1193. offset = nor_cs0_size;
  1194. data = (const void *)((unsigned int )data + tlen);
  1195. len -= tlen;
  1196. }
  1197. }
  1198. ret = spi_flash_acts_write(dev, offset, data, len);
  1199. spi_flash_cs2_unlock();
  1200. return ret;
  1201. }
  1202. int spi_flash_acts_2cs_erase(const struct device *dev, off_t offset, size_t size)
  1203. {
  1204. size_t tlen;
  1205. int ret;
  1206. if (spi_flash_not_wr())
  1207. return -1;
  1208. spi_flash_cs2_lock();
  1209. if(offset < nor_cs0_size){
  1210. if(offset+size > nor_cs0_size){
  1211. tlen = nor_cs0_size - offset;
  1212. spi_flash_acts_erase(dev, offset, tlen);
  1213. offset = nor_cs0_size;
  1214. size -= tlen;
  1215. }
  1216. }
  1217. ret = spi_flash_acts_erase(dev, offset, size);
  1218. spi_flash_cs2_unlock();
  1219. return ret;
  1220. }
  1221. static struct flash_driver_api spi_flash_nor_api = {
  1222. .read = spi_flash_acts_2cs_read,
  1223. .write = spi_flash_acts_2cs_write,
  1224. .erase = spi_flash_acts_2cs_erase,
  1225. .write_protection = spi_flash_acts_write_protection,
  1226. .get_parameters = spi_flash_get_parameters,
  1227. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1228. .page_layout = spi_flash_acts_pages_layout,
  1229. #endif
  1230. };
  1231. #else
  1232. static struct flash_driver_api spi_flash_nor_api = {
  1233. .read = spi_flash_acts_read,
  1234. .write = spi_flash_acts_write,
  1235. .erase = spi_flash_acts_erase,
  1236. .write_protection = spi_flash_acts_write_protection,
  1237. .get_parameters = spi_flash_get_parameters,
  1238. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1239. .page_layout = spi_flash_acts_pages_layout,
  1240. #endif
  1241. };
  1242. #endif
  1243. /* system XIP spinor */
  1244. static struct spinor_info __act_s2_sleep_data spi_flash_acts_data = {
  1245. .spi = {
  1246. .base = SPI0_REG_BASE,
  1247. .bus_width = CONFIG_SPI_FLASH_BUS_WIDTH,
  1248. .delay_chain = CONFIG_SPI_FLASH_DELAY_CHAIN,
  1249. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  1250. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  1251. #endif
  1252. .flag = 0,
  1253. },
  1254. .flag = 0,
  1255. };
  1256. __sleepfunc void sys_norflash_power_ctrl(uint32_t is_powerdown)
  1257. {
  1258. struct spinor_info *sni = &spi_flash_acts_data;
  1259. if (is_powerdown){
  1260. p_spinor_api->write_cmd(sni, 0xB9); // enter deep power down
  1261. soc_udelay(5); // max 3us
  1262. } else {
  1263. //sni->spi.flag |= SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  1264. p_spinor_api->write_cmd(sni, 0xAB);
  1265. //sni->spi.flag &= ~SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  1266. soc_udelay(40); // max 30us
  1267. }
  1268. }
  1269. #if 0
  1270. void spinor_set(int bus_width, int use_dma, int nxio)
  1271. {
  1272. spi_flash_acts_data.spi.bus_width = bus_width;
  1273. if(use_dma)
  1274. spi_flash_acts_data.spi.dma_base = (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100));
  1275. else
  1276. spi_flash_acts_data.spi.dma_base = 0;
  1277. if(nxio){
  1278. spi_flash_acts_data.spi.flag |= SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO;
  1279. }else{
  1280. spi_flash_acts_data.spi.flag &= ~(SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO);
  1281. }
  1282. }
  1283. #endif
  1284. static const struct spi_flash_acts_config spi_acts_config = {
  1285. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1286. .pages_layout = {
  1287. .pages_count = CONFIG_SPI_FLASH_CHIP_SIZE/0x1000,
  1288. .pages_size = 0x1000,
  1289. },
  1290. #endif
  1291. .chip_size = CONFIG_SPI_FLASH_CHIP_SIZE,
  1292. .page_size = 0x1000,
  1293. };
  1294. #if IS_ENABLED(CONFIG_SPI_FLASH_0)
  1295. DEVICE_DEFINE(spi_flash_acts, CONFIG_SPI_FLASH_NAME, &spi_flash_acts_init, spi_flash_pm_control,
  1296. &spi_flash_acts_data, &spi_acts_config, PRE_KERNEL_1,
  1297. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &spi_flash_nor_api);
  1298. #endif
  1299. #if IS_ENABLED(CONFIG_SPI_FLASH_2)
  1300. static K_MUTEX_DEFINE(flash_2_mutex);
  1301. static int spi_flash_2_acts_read(const struct device *dev, uint64_t offset, void *data, uint64_t len)
  1302. {
  1303. struct spinor_info *sni = DEV_DATA(dev);
  1304. int ret = 0;
  1305. size_t tmplen;
  1306. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1307. tmplen = len;
  1308. while(tmplen > 0) {
  1309. if(tmplen < 0x8000)
  1310. len = tmplen;
  1311. else
  1312. len = 0x8000;
  1313. ret = p_spinor_api->read(sni, offset, data, len);
  1314. offset += len;
  1315. data = (void *)((char *)data + len);
  1316. tmplen -= len;
  1317. }
  1318. k_mutex_unlock(&flash_2_mutex);
  1319. return ret;
  1320. }
  1321. static int spi_flash_2_acts_write(const struct device *dev, uint64_t offset, const void *data, uint64_t len)
  1322. {
  1323. struct spinor_info *sni = DEV_DATA(dev);
  1324. int ret;
  1325. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1326. ret = p_spinor_api->write(sni, offset, data, len);
  1327. k_mutex_unlock(&flash_2_mutex);
  1328. return ret ;
  1329. }
  1330. static int spi_flash_2_acts_erase(const struct device *dev, uint64_t offset, uint64_t size)
  1331. {
  1332. struct spinor_info *sni = DEV_DATA(dev);
  1333. int ret;
  1334. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1335. ret = p_spinor_api->erase(sni, offset, size);
  1336. k_mutex_unlock(&flash_2_mutex);
  1337. return ret ;
  1338. }
  1339. static int spi_flash_2_pwoer(struct spinor_info *sni, bool on)
  1340. {
  1341. #if IS_ENABLED(CONFIG_SPI_FLASH_2_USE_GPIO_POWER)
  1342. int ret;
  1343. int gpio_value = CONFIG_SPI_FLASH_2_GPIO_POWER_LEVEL;
  1344. const struct device *power_gpio_dev;
  1345. uint8_t power_gpio = CONFIG_SPI_FLASH_2_POWER_GPIO % 32;
  1346. power_gpio_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  1347. if (!power_gpio_dev) {
  1348. LOG_ERR("Failed to bind nor power GPIO(%d:%s)", power_gpio, CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  1349. return -1;
  1350. }
  1351. ret = gpio_pin_configure(power_gpio_dev, power_gpio, GPIO_OUTPUT);
  1352. if (ret) {
  1353. LOG_ERR("Failed to config output GPIO:%d", power_gpio);
  1354. return ret;
  1355. }
  1356. if (on) {
  1357. /* power on nor */
  1358. gpio_pin_set(power_gpio_dev, power_gpio, gpio_value);
  1359. } else {
  1360. /* power off nor */
  1361. gpio_pin_set(power_gpio_dev, power_gpio, !gpio_value);
  1362. }
  1363. #else
  1364. if (on) {
  1365. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  1366. } else {
  1367. p_spinor_api->write_cmd(sni, 0xB9); // enter deep power down
  1368. }
  1369. #endif
  1370. return 0;
  1371. }
  1372. #ifdef CONFIG_PM_DEVICE
  1373. int spi_flash_2_pm_control(const struct device *device, enum pm_device_action action)
  1374. {
  1375. struct spinor_info *sni = DEV_DATA(device);
  1376. if(action == PM_DEVICE_ACTION_RESUME){
  1377. LOG_INF("spi2 nor resume ...\n");
  1378. spi_flash_2_pwoer(sni, true);
  1379. }else if(action == PM_DEVICE_ACTION_SUSPEND){
  1380. LOG_INF("spi2 nor suspend ...\n");
  1381. spi_flash_2_pwoer(sni, false);
  1382. }
  1383. return 0;
  1384. }
  1385. #else
  1386. #define spi_flash_2_pm_control NULL
  1387. #endif
  1388. static int spi_flash_2_acts_init(const struct device *dev)
  1389. {
  1390. struct spinor_info *sni = DEV_DATA(dev);
  1391. uint8_t status, status2, status3;
  1392. unsigned char cid;
  1393. printk("spi3 flash init\n");
  1394. /* enable spi3 controller clock */
  1395. acts_clock_peripheral_enable(CLOCK_ID_SPI3);
  1396. /* reset spi3 controller */
  1397. acts_reset_peripheral(RESET_ID_SPI3);
  1398. /* setup SPI3 clock rate */
  1399. clk_set_rate(CLOCK_ID_SPI3, MHZ(CONFIG_SPI_FLASH_2_FREQ_MHZ));
  1400. spi_flash_2_pwoer(sni, true);
  1401. sni->chipid = p_spinor_api->read_chipid(sni);
  1402. cid = (sni->chipid & 0xff0000)>>16;
  1403. printk("read spi3 nor chipid:0x%x, cid=%d\n", sni->chipid, cid);
  1404. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1405. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1406. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1407. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1408. if(cid > 24){ //capacity = 2^cid (byte)
  1409. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  1410. }
  1411. if(sni->spi.bus_width == 4) {
  1412. printk("data nor is 4 line mode\n");
  1413. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  1414. /* check QE bit */
  1415. if (!(status2 & 0x2)) {
  1416. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  1417. status2 |= 0x2;
  1418. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  1419. (u8_t *)&status2, 1);
  1420. }
  1421. } else if(sni->spi.bus_width == 2) {
  1422. printk("data nor is 2 line mode\n");
  1423. } else {
  1424. sni->spi.bus_width = 1;
  1425. printk("data nor is 1 line mode\n");
  1426. }
  1427. /* check delay chain workable */
  1428. sni->chipid = p_spinor_api->read_chipid(sni);
  1429. printk("read again spi3 nor chipid:0x%x\n", sni->chipid);
  1430. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1431. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1432. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1433. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1434. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  1435. //nor_test_delaychain(dev);
  1436. #endif
  1437. return 0;
  1438. }
  1439. static struct flash_driver_api spi_flash_2_nor_api = {
  1440. .read = spi_flash_2_acts_read,
  1441. .write = spi_flash_2_acts_write,
  1442. .erase = spi_flash_2_acts_erase,
  1443. .get_parameters = spi_flash_get_parameters,
  1444. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1445. .page_layout = spi_flash_acts_pages_layout,
  1446. #endif
  1447. };
  1448. static struct spinor_info spi_flash_2_acts_data = {
  1449. .spi = {
  1450. .base = SPI3_REG_BASE,
  1451. .bus_width = CONFIG_SPI_FLASH_2_BUS_WIDTH,
  1452. .delay_chain = CONFIG_SPI_FLASH_2_DELAY_CHAIN,
  1453. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  1454. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  1455. #endif
  1456. .flag = SPI_FLAG_NO_IRQ_LOCK,
  1457. },
  1458. .flag = 0,
  1459. };
  1460. #if 0
  1461. void spinor3_set(int bus_width, int use_dma, int nxio)
  1462. {
  1463. spi_flash_2_acts_data.spi.bus_width = bus_width;
  1464. if(use_dma)
  1465. spi_flash_2_acts_data.spi.dma_base = (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100));
  1466. else
  1467. spi_flash_2_acts_data.spi.dma_base = 0;
  1468. if(nxio){
  1469. spi_flash_2_acts_data.spi.flag |= SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO;
  1470. }else{
  1471. spi_flash_2_acts_data.spi.flag &= ~(SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO);
  1472. }
  1473. }
  1474. void spinor03_set_dt(int spi, int delaytran)
  1475. {
  1476. if(spi == 0)
  1477. spi_flash_acts_data.spi.delay_chain = delaytran;
  1478. else
  1479. spi_flash_2_acts_data.spi.delay_chain = delaytran;
  1480. }
  1481. #endif
  1482. static const struct spi_flash_acts_config spi_flash_2_acts_config = {
  1483. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1484. .pages_layout = {
  1485. .pages_count = CONFIG_SPI_FLASH_2_CHIP_SIZE/0x1000,
  1486. .pages_size = 0x1000,
  1487. },
  1488. #endif
  1489. .chip_size = CONFIG_SPI_FLASH_2_CHIP_SIZE,
  1490. .page_size = 0x1000,
  1491. };
  1492. DEVICE_DEFINE(spi_flash_2_acts, CONFIG_SPI_FLASH_2_NAME, &spi_flash_2_acts_init, spi_flash_2_pm_control,
  1493. &spi_flash_2_acts_data, &spi_flash_2_acts_config, POST_KERNEL,
  1494. CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, &spi_flash_2_nor_api);
  1495. #endif
  1496. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  1497. #define SPINOR_CMD_SECURITY_ERASE 0x44 /* erase security registers cmd*/
  1498. #define SPINOR_CMD_SECURITY_PROGRAM 0x42 /* program security registers cmd*/
  1499. #define SPINOR_CMD_SECURITY_READ 0x48 /* read security registers cmd*/
  1500. #define SPINOR_CMD_UID_READ 0x4B /* Read Unique ID cmd*/
  1501. __ramfunc static int spinor_erase_security(struct spinor_info *sni, unsigned int addr)
  1502. {
  1503. u32_t key;
  1504. u8_t addr_mode;
  1505. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1506. if (cid > 24)
  1507. addr_mode = 4;
  1508. else
  1509. addr_mode = 3;
  1510. key = irq_lock(); //ota diff upgrade, must be irq lock
  1511. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  1512. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_ERASE, addr, addr_mode, 0, 0, 0, SPIMEM_TFLAG_WRITE_DATA);
  1513. spinor_wait_ready(sni);
  1514. irq_unlock(key);
  1515. return 0;
  1516. }
  1517. __ramfunc static int spinor_write_security(struct spinor_info *sni, unsigned int addr, const void *data, int len)
  1518. {
  1519. u32_t key;
  1520. u8_t addr_mode;
  1521. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1522. if (cid > 24)
  1523. addr_mode = 4;
  1524. else
  1525. addr_mode = 3;
  1526. key = irq_lock();
  1527. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  1528. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_PROGRAM, addr, addr_mode, (u8_t *)data, len, 0, SPIMEM_TFLAG_WRITE_DATA);
  1529. spinor_wait_ready(sni);
  1530. irq_unlock(key);
  1531. return 0;
  1532. }
  1533. __ramfunc static int spinor_read_security(struct spinor_info *sni, unsigned int addr, void *data, int len)
  1534. {
  1535. u32_t key;
  1536. u8_t addr_mode;
  1537. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1538. if (cid > 24)
  1539. addr_mode = 4;
  1540. else
  1541. addr_mode = 3;
  1542. key = irq_lock();
  1543. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_READ, addr, addr_mode, (u8_t *)data, len, 1, 0);
  1544. irq_unlock(key);
  1545. return 0;
  1546. }
  1547. __ramfunc static int spinor_read_uid(struct spinor_info *sni, void *data, int len)
  1548. {
  1549. u32_t key;
  1550. u8_t addr_mode;
  1551. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1552. if (cid > 24)
  1553. addr_mode = 4;
  1554. else
  1555. addr_mode = 3;
  1556. key = irq_lock();
  1557. p_spinor_api->transfer(sni, SPINOR_CMD_UID_READ, 0, addr_mode, (u8_t *)data, len, 1, 0);
  1558. irq_unlock(key);
  1559. return 0;
  1560. }
  1561. #define NOR_SE_PAGE_SIZE 256
  1562. #define NOR_SE_PAGE_MASK (NOR_SE_PAGE_SIZE-1)
  1563. #define NOR_SE_MAX_SIZE_EACH_REGN 1024 /**/
  1564. /*security_regn 0-3*/
  1565. int spi_flash_security_erase(const struct device *dev, unsigned int security_regn)
  1566. {
  1567. struct spinor_info *sni = DEV_DATA(dev);
  1568. return spinor_erase_security(sni, security_regn<<12);
  1569. }
  1570. int spi_flash_security_write(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  1571. {
  1572. unsigned int wlen, unlen;
  1573. struct spinor_info *sni = DEV_DATA(dev);
  1574. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  1575. return -1;
  1576. unlen = offset & NOR_SE_PAGE_MASK;
  1577. while(len){
  1578. if(unlen){
  1579. wlen = NOR_SE_PAGE_SIZE - unlen;
  1580. if(wlen > len)
  1581. wlen = len;
  1582. unlen = 0;
  1583. }else{
  1584. if(len < NOR_SE_PAGE_SIZE)
  1585. wlen = len;
  1586. else
  1587. wlen = NOR_SE_PAGE_SIZE;
  1588. }
  1589. spinor_write_security(sni, (security_regn<<12)|offset, data, wlen);
  1590. data = (unsigned char *)data + wlen;
  1591. len -= wlen;
  1592. offset += wlen;
  1593. }
  1594. return 0;
  1595. }
  1596. int spi_flash_security_read(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  1597. {
  1598. struct spinor_info *sni = DEV_DATA(dev);
  1599. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  1600. return -1;
  1601. return spinor_read_security(sni, (security_regn<<12)|offset, data, len);
  1602. }
  1603. int spi_flash_uid_read(const struct device *dev, void *uid, unsigned int len)
  1604. {
  1605. struct spinor_info *sni = DEV_DATA(dev);
  1606. return spinor_read_uid(sni, uid, len);
  1607. }
  1608. #include <string.h>
  1609. static unsigned int g_tmp_buf[256];
  1610. void spinor_test_uid_securty(const struct device *dev)
  1611. {
  1612. unsigned int start_ms,end_ms, i, k;
  1613. unsigned int *pb = g_tmp_buf;
  1614. spi_flash_uid_read(dev, pb, 16);
  1615. printk("uid=0x%x, 0x%x, 0x%x, 0x%x\n", pb[0], pb[1], pb[2], pb[3]);
  1616. for(i = 1; i < 4; i++){// test security1-security3
  1617. start_ms = k_cycle_get_32();
  1618. spi_flash_security_erase(dev, i);
  1619. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  1620. printk("scurity erase %d use=%d ms\n", i, end_ms);
  1621. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1622. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {// check erase ok
  1623. if(pb[k] != 0xffffffff){
  1624. printk("erase check fail %d : off=0x%x, 0x%x!=0xffffffff\n", i, k*4, pb[k]);
  1625. break;
  1626. }
  1627. }
  1628. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {
  1629. pb[k] = k + 0x12345600*i;
  1630. }
  1631. start_ms = k_cycle_get_32();
  1632. spi_flash_security_write(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1633. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  1634. printk("scurity write 1KB %d use=%d ms\n", i, end_ms);
  1635. }
  1636. for(i = 1; i < 4; i++){
  1637. memset(pb, 0, NOR_SE_PAGE_SIZE);
  1638. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1639. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++){
  1640. if(pb[k] != k + 0x12345600*i){
  1641. printk("scurity read cmp fail:%d,off=0x%x,0x%x!=0x%x\n",i, k*4, pb[k], k + 0x12345600*i);
  1642. break;
  1643. }
  1644. }
  1645. }
  1646. printk("secutrity test finished\n");
  1647. }
  1648. #endif