spi_flash_leopard.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760
  1. /*
  2. * Copyright (c) 2018 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <drivers/flash.h>
  7. #include <drivers/spi.h>
  8. #include <logging/log.h>
  9. #include <soc.h>
  10. #include <board_cfg.h>
  11. #include "spi_flash.h"
  12. #include <linker/linker-defs.h>
  13. #include <drivers/gpio.h>
  14. /* spinor parameters */
  15. #define SPINOR_WRITE_PAGE_SIZE_BITS 8
  16. #define SPINOR_ERASE_SECTOR_SIZE_BITS 12
  17. #define SPINOR_ERASE_BLOCK_SIZE_BITS 16
  18. #define SPINOR_WRITE_PAGE_SIZE (1 << SPINOR_WRITE_PAGE_SIZE_BITS)
  19. #define SPINOR_ERASE_SECTOR_SIZE (1 << SPINOR_ERASE_SECTOR_SIZE_BITS)
  20. #define SPINOR_ERASE_BLOCK_SIZE (1 << SPINOR_ERASE_BLOCK_SIZE_BITS)
  21. #define SPINOR_WRITE_PAGE_MASK (SPINOR_WRITE_PAGE_SIZE - 1)
  22. #define SPINOR_ERASE_SECTOR_MASK (SPINOR_ERASE_SECTOR_SIZE - 1)
  23. #define SPINOR_ERASE_BLOCK_MASK (SPINOR_ERASE_BLOCK_SIZE - 1)
  24. #define SPINOR_CMD_PROGRAM_ERASE_RESUME 0x7a /* nor resume */
  25. #define SPINOR_CMD_PROGRAM_ERASE_SUSPEND 0x75 /* nor suspend */
  26. #define SPINOR_CMD_READ_STATUS 0x05 /* read status1 */
  27. #define SPIMEM_CMD_ENABLE_WRITE 0x06 /* enable write */
  28. #define SPIMEM_TFLAG_WRITE_DATA 0x08
  29. #define SPINOR_CMD_WR_VOL_CFG 0x81 /*Write volatile Configuration Registe*/
  30. #define SPINOR_CMD_RD_VOL_CFG 0x85 /*read volatile Configuration Registe*/
  31. LOG_MODULE_REGISTER(spi_flash_acts, CONFIG_FLASH_LOG_LEVEL);
  32. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  33. static const struct device *spi_gpio_cs_dev;
  34. unsigned int nor_cs0_size, nor_cs1_size;
  35. unsigned char nor_cs0_delaytran, nor_cs1_delaytran;
  36. __ramfunc static void spi_flash_acts_cs_gpio(struct spi_info *si, int value)
  37. {
  38. if (spi_gpio_cs_dev) {
  39. if (value) {
  40. sys_write32(GPIO_BIT(CONFIG_SPI_FLASH_1_GPIO_CS_PIN), GPION_BSR(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  41. } else {
  42. sys_write32(GPIO_BIT(CONFIG_SPI_FLASH_1_GPIO_CS_PIN), GPION_BRR(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  43. }
  44. }
  45. }
  46. __ramfunc void spi_flash1_cs_select(struct spinor_info *sni, int select)
  47. {
  48. if(select){
  49. if(nor_cs1_size > 0x1000000)
  50. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  51. else
  52. sni->flag &= ~SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  53. sni->spi.set_cs = spi_flash_acts_cs_gpio;
  54. sni->spi.delay_chain = nor_cs1_delaytran;
  55. }else{
  56. if(nor_cs0_size > 0x1000000)
  57. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  58. else
  59. sni->flag &= ~SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  60. sni->spi.set_cs = NULL;
  61. sni->spi.delay_chain = nor_cs0_delaytran;
  62. }
  63. }
  64. /*for delaytran scan */
  65. unsigned char spi_flash_set_delaytran(const struct device *dev, off_t offset, unsigned char delaytran)
  66. {
  67. unsigned char old;
  68. struct spinor_info *sni = DEV_DATA(dev);
  69. if(offset < nor_cs0_size){
  70. old = nor_cs0_delaytran;
  71. nor_cs0_delaytran = delaytran;
  72. sni->spi.delay_chain = nor_cs0_delaytran;
  73. }else{
  74. old = nor_cs1_delaytran;
  75. nor_cs1_delaytran = delaytran;
  76. }
  77. return old;
  78. }
  79. #else
  80. /*for delaytran scan */
  81. unsigned char spi_flash_set_delaytran(const struct device *dev, off_t offset, unsigned char delaytran)
  82. {
  83. unsigned char old;
  84. struct spinor_info *sni = DEV_DATA(dev);
  85. old = sni->spi.delay_chain;
  86. sni->spi.delay_chain = delaytran;
  87. return old;
  88. }
  89. #endif
  90. __ramfunc void spi_flash_exit_continuous(struct spi_info *si)
  91. {
  92. p_spinor_api->continuous_read_reset((struct spinor_info *)si);
  93. }
  94. __ramfunc void spi_flash_acts_prepare(struct spi_info *si)
  95. {
  96. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  97. void * bak_cs;
  98. bak_cs = si->set_cs;
  99. si->set_cs = NULL;
  100. #endif
  101. /* wait for spi ready */
  102. while(!(sys_read32(SPI_STA(si->base)) & SPI_STA_READY)){
  103. }
  104. if(!(si->flag & SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ)) {
  105. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  106. if(!(si->flag & SPI_FLAG_QPI_MODE))
  107. #endif
  108. spi_flash_exit_continuous(si);
  109. }
  110. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  111. if(bak_cs != NULL)
  112. si->set_cs = bak_cs;
  113. #endif
  114. }
  115. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  116. __ramfunc static void spi0_dtr_set_clk(uint32_t rate_hz)
  117. {
  118. uint32_t core_pll, div, real_rate, val;
  119. core_pll = MHZ(((sys_read32(COREPLL_CTL)&0x3F)*8));
  120. div = (core_pll+rate_hz-1)/rate_hz;
  121. real_rate = core_pll/div;
  122. val = (div-1)|(1<<8) | (1<<12);
  123. sys_write32(val, CMU_SPI0CLK);
  124. }
  125. __ramfunc static unsigned int spi0_cache_enter_dtr_mode(struct spinor_info *sni, uint32_t clk_mhz)
  126. {
  127. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  128. spi->ctrl |= 1<<19;
  129. spi->delaychain |= 1<< 8; // 10 dummy
  130. //sys_set_bit(CMU_SPI0CLK, 12); // CLKD DDR MODE
  131. spi0_dtr_set_clk(MHZ(clk_mhz) * 2);
  132. return p_spinor_api->read_chipid(sni)& 0xffffff;// read chipid for exit contimue mode;
  133. }
  134. #if 0
  135. __ramfunc static unsigned int spi0_cache_exit_dtr_mode(struct spinor_info *sni)
  136. {
  137. //struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  138. //spi->ctrl &= ~(1<<19);
  139. //sys_clear_bit(CMU_SPI0CLK, 12);
  140. return 0;
  141. }
  142. #endif
  143. #endif
  144. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  145. #define SPIMEM_TFLAG_MIO_CMD_ADDR_DATA 0x04
  146. __ramfunc static void xspi_nor_enable_qpi(struct spinor_info *sni)
  147. {
  148. if(sni->spi.bus_width != 4)
  149. return;
  150. p_spinor_api->transfer(sni, XSPI_NOR_CMD_QPI_ENABLE, 0, 0, NULL, 0, 0, 0);
  151. sni->spi.flag &= ~SPI_FLAG_QPI_MODE;
  152. }
  153. __ramfunc static void xspi_nor_disable_qpi(struct spinor_info *sni)
  154. {
  155. if(sni->spi.bus_width != 4)
  156. return;
  157. p_spinor_api->transfer(sni, XSPI_NOR_CMD_QPI_DISABLE, 0, 0, NULL, 0, 0, SPIMEM_TFLAG_MIO_CMD_ADDR_DATA);
  158. sni->spi.flag |= SPI_FLAG_QPI_MODE;
  159. }
  160. __ramfunc static void xspi_nor_qpi_init(struct spinor_info *sni)
  161. {
  162. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  163. if(sni->spi.bus_width == 4){
  164. xspi_nor_enable_qpi(sni);
  165. sni->spi.flag |= SPI_FLAG_QPI_MODE;
  166. p_spinor_api->transfer(sni, XSPI_NOR_CMD_SETPARA_QPI, 2<<4, 1, NULL, 0, 0, SPIMEM_TFLAG_MIO_CMD_ADDR_DATA); // set 6 dummy clk
  167. sni->spi.flag &= ~SPI_FLAG_QPI_MODE;
  168. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  169. spi->ctrl |= 1<<18; // spi 0 enable dtr qpi
  170. #else
  171. spi->ctrl |= 1<<17; // spi 0 enable qpi
  172. #endif
  173. }
  174. }
  175. #endif
  176. #ifdef CONFIG_SPI_XIP_READ
  177. static void spi_flash_xip_init(void)
  178. {
  179. int err = soc_memctrl_mapping(CONFIG_SPI_XIP_VADDR, 0 , 0);
  180. if (err) {
  181. LOG_ERR(" flash xip map fail %d\n", err);
  182. }else{
  183. LOG_INF("flash xip map ok v=0x%x\n", CONFIG_SPI_XIP_VADDR);
  184. }
  185. }
  186. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  187. static int spi_flash_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  188. {
  189. unsigned int xip_start;
  190. struct spinor_info *sni = DEV_DATA(dev);
  191. int ret = 0;
  192. size_t tmplen;
  193. uint32_t key;
  194. if(offset >= nor_cs0_size){
  195. offset -= nor_cs0_size;
  196. tmplen = len;
  197. while(tmplen > 0) {
  198. if(tmplen < 0x8000)
  199. len = tmplen;
  200. else
  201. len = 0x8000;
  202. key = irq_lock();
  203. spi_flash1_cs_select(sni, 1);
  204. ret = p_spinor_api->read(sni, offset, data, len);
  205. spi_flash1_cs_select(sni, 0);
  206. irq_unlock(key);
  207. offset += len;
  208. data = (void *)((unsigned int )data + len);
  209. tmplen -= len;
  210. }
  211. }else{
  212. xip_start = CONFIG_SPI_XIP_VADDR + offset;
  213. pbrom_libc_api->p_memcpy(data, (void *)xip_start, len);
  214. }
  215. return ret;
  216. }
  217. #else
  218. #define XIP_NOR_MAX_LEN 0x2000000 // xip only support 32MB
  219. static int spi_flash_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  220. {
  221. unsigned int xip_start, key;
  222. size_t tlen;
  223. int ret;
  224. struct spinor_info *sni = DEV_DATA(dev);
  225. if(offset < XIP_NOR_MAX_LEN){
  226. xip_start = CONFIG_SPI_XIP_VADDR + offset;
  227. if(offset+len > XIP_NOR_MAX_LEN){
  228. tlen = XIP_NOR_MAX_LEN - offset;
  229. pbrom_libc_api->p_memcpy(data, (void *)xip_start, tlen);
  230. offset = XIP_NOR_MAX_LEN;
  231. len -= tlen;
  232. data =(void *)((unsigned int)data + tlen);
  233. }else{
  234. pbrom_libc_api->p_memcpy(data, (void *)xip_start, len);
  235. return 0;
  236. }
  237. }
  238. while(len) {
  239. if(len < 0x8000)
  240. tlen = len;
  241. else
  242. tlen = 0x8000;
  243. key = irq_lock();
  244. ret = p_spinor_api->read(sni, offset, data, tlen);
  245. irq_unlock(key);
  246. offset += tlen;
  247. data = (void *)((unsigned int )data + tlen);
  248. len -= tlen;
  249. }
  250. return 0;
  251. }
  252. #endif
  253. #else
  254. __ramfunc int spi_flash_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  255. {
  256. struct spinor_info *sni = DEV_DATA(dev);
  257. int ret = 0;
  258. size_t tmplen;
  259. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  260. int cs_sel;
  261. if(offset < nor_cs0_size){
  262. cs_sel = 0;
  263. }else{
  264. cs_sel = 1;
  265. offset -= nor_cs0_size;
  266. }
  267. #endif
  268. tmplen = len;
  269. while(tmplen > 0) {
  270. if(tmplen < 0x8000)
  271. len = tmplen;
  272. else
  273. len = 0x8000;
  274. uint32_t key = irq_lock();
  275. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  276. if(cs_sel) {
  277. spi_flash1_cs_select(sni, 1);
  278. }
  279. #endif
  280. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  281. xspi_nor_disable_qpi(sni);
  282. #endif
  283. ret = p_spinor_api->read(sni, offset, data, len);
  284. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  285. xspi_nor_enable_qpi(sni);
  286. #endif
  287. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  288. if(cs_sel){
  289. spi_flash1_cs_select(sni, 0);
  290. }
  291. #endif
  292. irq_unlock(key);
  293. offset += len;
  294. data = (void *)((unsigned int )data + len);
  295. tmplen -= len;
  296. }
  297. return ret;
  298. }
  299. #endif
  300. #ifdef CONFIG_NOR_SUSPEND_RESUME
  301. /*
  302. XT25F64F: suspend&resume 会导致0x0 地址翻转,需要fix ic bug,发resume 命令之前要读一下erase 地址,
  303. 然后再发resume 命令(resume 命令不能发退出continue 模式指令(2 个ff))。
  304. */
  305. #define XT25F64F_CHIPID 0x17400b
  306. __ramfunc static void spinor_suspend(struct spinor_info *sni)
  307. {
  308. int i, j;
  309. // program/erase suspend
  310. for(j = 0; j < 3; j++){
  311. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_SUSPEND);
  312. soc_udelay(30);
  313. for(i = 0; i < 100; i++) { //max 500us, tSUS must 30us
  314. soc_udelay(5);
  315. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)){
  316. break;
  317. }
  318. }
  319. if(i != 100){
  320. break;
  321. }
  322. }
  323. }
  324. __ramfunc static bool spinor_resume_and_check_idle(struct spinor_info *sni, unsigned int addr)
  325. {
  326. bool ret;
  327. uint32_t key, i;
  328. char tmp[8];
  329. key = irq_lock();
  330. // program/erase resum
  331. if(sni->chipid == XT25F64F_CHIPID){ /*fix XT25F64F nor bug*/
  332. p_spinor_api->read(sni, addr, tmp, 4);
  333. sni->spi.flag |= SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  334. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  335. sni->spi.flag &= ~SPI_FLAG_NO_NEED_EXIT_CONTINUOUS_READ;
  336. }else{
  337. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  338. }
  339. soc_udelay(30);
  340. for(i = 0; i < 100; i++){ // wait to exit suspend
  341. soc_udelay(5);
  342. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2) & 0x80)){
  343. break;
  344. }
  345. }
  346. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)) {
  347. ret = true;
  348. }else {
  349. for(i = 0; i < 20; i++){ // handle 1000 us
  350. soc_udelay(50);
  351. if (0 == (p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1)){
  352. break;
  353. }
  354. }
  355. if(i != 20){
  356. ret = true;
  357. }else{
  358. spinor_suspend(sni);
  359. ret = false;
  360. }
  361. }
  362. irq_unlock(key);
  363. return ret;
  364. }
  365. __ramfunc static void spinor_wait_finished(struct spinor_info *sni, unsigned int addr)
  366. {
  367. int i;
  368. uint32_t key;
  369. for(i = 0; i < 20000; i++){ //2000*500us= 10000ms overtimer
  370. if (spinor_resume_and_check_idle(sni, addr))
  371. break;
  372. if(!k_is_in_isr()){
  373. if((i & 0x1) == 0)
  374. k_msleep(1);
  375. }
  376. }
  377. if(i == 20000){
  378. LOG_INF("nor resume error\n");
  379. key = irq_lock();
  380. while(p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1); // wait nor ready
  381. irq_unlock(key);
  382. }
  383. }
  384. void spinor_resume_finished(struct spinor_info *sni);
  385. static void spi_flash_suspend_finished(struct spinor_info *sni)
  386. {
  387. if(!k_is_in_isr())
  388. return;
  389. if(sni->flag & SPINOR_FLAG_NO_WAIT_READY){
  390. spinor_resume_finished(sni);
  391. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  392. }
  393. }
  394. K_MUTEX_DEFINE(spinor_w_mutex);
  395. static void spi_flash_w_lock(void)
  396. {
  397. if(!k_is_in_isr()){
  398. k_mutex_lock(&spinor_w_mutex, K_FOREVER);
  399. }
  400. }
  401. static void spi_flash_w_unlock(void)
  402. {
  403. if(!k_is_in_isr()){
  404. k_mutex_unlock(&spinor_w_mutex);
  405. }
  406. }
  407. #else // CONFIG_NOR_SUSPEND_RESUME must NOT define
  408. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  409. __ramfunc static int spinor_2cs_wait_ready(struct spinor_info *sni)
  410. {
  411. unsigned char status;
  412. uint32_t key;
  413. while (1) {
  414. key = irq_lock();
  415. spi_flash1_cs_select(sni, 1);
  416. status = p_spinor_api->read_status(sni, SPINOR_CMD_READ_STATUS);
  417. spi_flash1_cs_select(sni, 0);
  418. irq_unlock(key);
  419. if (!(status & 0x1))
  420. break;
  421. if(!k_is_in_isr()){
  422. k_msleep(2);
  423. }else{
  424. soc_udelay(2000);
  425. }
  426. }
  427. return 0;
  428. }
  429. __ramfunc void spinor_cs0_check_irq(struct spinor_info *sni)
  430. {
  431. if (k_is_in_isr()) {
  432. if (sni->flag & SPINOR_FLAG_NO_WAIT_READY) {
  433. soc_udelay(100000);//delay 100ms for nor2 erase timer
  434. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  435. }
  436. }
  437. }
  438. #endif //end #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  439. #endif // endif #ifdef CONFIG_NOR_SUSPEND_RESUME
  440. __ramfunc void spinor_resume_finished(struct spinor_info *sni)
  441. {
  442. LOG_INF("nor is suspend, wait resume finished\n");
  443. p_spinor_api->write_cmd(sni, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  444. soc_udelay(5);
  445. while(p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS) & 0x1); // wait nor ready
  446. }
  447. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  448. extern int check_panic_exe(void);
  449. #endif
  450. static int spi_flash_not_wr(void)
  451. {
  452. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  453. if (k_is_in_isr() && !check_panic_exe()) {
  454. printk("flash not allow write in irq\n");
  455. k_panic();
  456. return 1;
  457. }
  458. #endif
  459. return 0;
  460. }
  461. __ramfunc int spi_flash_acts_write(const struct device *dev, off_t offset, const void *data, size_t len)
  462. {
  463. struct spinor_info *sni = DEV_DATA(dev);
  464. int ret = 0;
  465. int wlen;
  466. uint32_t key;
  467. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  468. int cs_sel;
  469. if(offset < nor_cs0_size){
  470. cs_sel = 0;
  471. spinor_cs0_check_irq(sni);
  472. }else{
  473. cs_sel = 1;
  474. offset -= nor_cs0_size;
  475. spinor_2cs_wait_ready(sni);
  476. }
  477. #endif
  478. if (spi_flash_not_wr())
  479. return -1;
  480. #ifdef CONFIG_NOR_SUSPEND_RESUME
  481. spi_flash_w_lock();
  482. spi_flash_suspend_finished(sni);
  483. #endif
  484. while(len > 0) {
  485. if(len > SPINOR_WRITE_PAGE_SIZE)
  486. wlen = SPINOR_WRITE_PAGE_SIZE;
  487. else
  488. wlen = len;
  489. key = irq_lock();
  490. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  491. if(cs_sel) {
  492. spi_flash1_cs_select(sni, 1);
  493. }
  494. #endif
  495. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  496. xspi_nor_disable_qpi(sni);
  497. #endif
  498. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  499. //if(sni->flag & SPI_FLAG_WR_4IO)
  500. // spi0_cache_exit_dtr_mode(sni);
  501. #endif
  502. ret = p_spinor_api->write(sni, offset, data, wlen);
  503. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  504. //if(sni->flag & SPI_FLAG_WR_4IO)
  505. //spi0_cache_enter_dtr_mode(sni);
  506. #endif
  507. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  508. xspi_nor_enable_qpi(sni);
  509. #endif
  510. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  511. if(cs_sel) {
  512. spi_flash1_cs_select(sni, 0);
  513. }
  514. #endif
  515. irq_unlock(key);
  516. offset += wlen;
  517. data = (void *)((unsigned int )data + wlen);
  518. len -= wlen;
  519. }
  520. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  521. if(!cs_sel) {
  522. #endif
  523. #ifdef CONFIG_SPI_XIP_READ
  524. soc_memctrl_cache_invalid();
  525. #endif
  526. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  527. }
  528. #endif
  529. #ifdef CONFIG_NOR_SUSPEND_RESUME
  530. spi_flash_w_unlock();
  531. #endif
  532. return ret ;
  533. }
  534. __ramfunc int spi_flash_acts_erase(const struct device *dev, off_t offset, size_t size)
  535. {
  536. struct spinor_info *sni = DEV_DATA(dev);
  537. int ret = 0;
  538. uint32_t key;
  539. size_t erase_size = SPINOR_ERASE_SECTOR_SIZE;
  540. uint32_t t0,t1, t2;
  541. int use_block = 0;
  542. if (spi_flash_not_wr())
  543. return -1;
  544. #ifndef CONFIG_SPINOR_TEST_DELAYCHAIN
  545. LOG_INF("nor_e:offset=0x%x,len=0x%x\n", (uint32_t)offset, size);
  546. #endif
  547. #ifdef CONFIG_NOR_SUSPEND_RESUME
  548. bool b_suspend = true;
  549. use_block = 1;
  550. if((size >= SPINOR_ERASE_BLOCK_SIZE*4) || (k_is_in_isr())) // erase 256kb or panic, not suspend &resume
  551. b_suspend = false;
  552. spi_flash_w_lock();
  553. spi_flash_suspend_finished(sni);
  554. if(b_suspend)
  555. sni->flag |= SPINOR_FLAG_NO_WAIT_READY;
  556. #else
  557. if(size >= SPINOR_ERASE_BLOCK_SIZE*4) // erase 256kb, use block
  558. use_block = 1;
  559. #endif
  560. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  561. int cs_sel;
  562. if(offset < nor_cs0_size){
  563. cs_sel = 0;
  564. spinor_cs0_check_irq(sni);
  565. }else{
  566. cs_sel = 1;
  567. offset -= nor_cs0_size;
  568. spinor_2cs_wait_ready(sni);
  569. use_block = 1;
  570. }
  571. #endif
  572. while (size > 0) {
  573. if(use_block) {
  574. if (size < SPINOR_ERASE_BLOCK_SIZE) {
  575. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  576. } else if (offset & SPINOR_ERASE_BLOCK_MASK) {
  577. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  578. } else {
  579. erase_size = SPINOR_ERASE_BLOCK_SIZE;
  580. }
  581. }
  582. key= irq_lock();
  583. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  584. if(cs_sel) {
  585. sni->flag |= SPINOR_FLAG_NO_WAIT_READY;
  586. spi_flash1_cs_select(sni, 1);
  587. }
  588. #endif
  589. t0 = k_cycle_get_32();
  590. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  591. xspi_nor_disable_qpi(sni);
  592. #endif
  593. ret = p_spinor_api->erase(sni, offset, erase_size);
  594. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  595. xspi_nor_enable_qpi(sni);
  596. #endif
  597. #ifdef CONFIG_NOR_SUSPEND_RESUME
  598. if(b_suspend)
  599. spinor_suspend(sni);
  600. #endif
  601. #ifdef CONFIG_SPINOR_TEST_DELAYCHAIN
  602. soc_udelay(100000); // try fail, nor status may not finished, delay erase finished
  603. #endif
  604. t1 = k_cycle_get_32();
  605. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  606. if(cs_sel) {
  607. spi_flash1_cs_select(sni, 0);
  608. }
  609. #endif
  610. irq_unlock(key);
  611. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  612. if(cs_sel) {
  613. spinor_2cs_wait_ready(sni);
  614. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  615. }
  616. #endif
  617. #ifdef CONFIG_NOR_SUSPEND_RESUME
  618. if(b_suspend)
  619. spinor_wait_finished(sni, offset);
  620. #endif
  621. t2 = k_cycle_get_32();
  622. #ifndef CONFIG_SPINOR_TEST_DELAYCHAIN
  623. LOG_INF("nor_e:off=0x%x,len=0x%x, tran=%d us, wait=%d\n", (uint32_t)offset, erase_size,
  624. k_cyc_to_us_ceil32(t1-t0), k_cyc_to_us_ceil32(t2-t1));
  625. #endif
  626. size -= erase_size;
  627. offset += erase_size;
  628. }
  629. #ifdef CONFIG_SPI_XIP_READ
  630. soc_memctrl_cache_invalid();
  631. #endif
  632. #ifdef CONFIG_NOR_SUSPEND_RESUME
  633. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  634. spi_flash_w_unlock();
  635. #endif
  636. return ret ;
  637. }
  638. static __ramfunc void xspi_delay(void)
  639. {
  640. volatile int i = 100000;
  641. while (i--)
  642. ;
  643. }
  644. __ramfunc void xspi_nor_enable_status_qe(struct spinor_info *sni)
  645. {
  646. uint16_t status;
  647. /* MACRONIX's spinor has different QE bit */
  648. if (XSPI_NOR_MANU_ID_MACRONIX == (sni->chipid & 0xff)) {
  649. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  650. if (!(status & 0x40)) {
  651. /* set QE bit to disable HOLD/WP pin function */
  652. status |= 0x40;
  653. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  654. (u8_t *)&status, 1);
  655. }
  656. return;
  657. }
  658. /* check QE bit */
  659. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  660. if (!(status & 0x2)) {
  661. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  662. status |= 0x2;
  663. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  664. (u8_t *)&status, 1);
  665. /* check QE bit again */
  666. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  667. if (!(status & 0x2)) {
  668. /* oh, let's try old write status cmd, for GigaDevice/Berg */
  669. status = ((status | 0x2) << 8) |
  670. p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  671. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS,
  672. (u8_t *)&status, 2);
  673. }
  674. }
  675. xspi_delay();
  676. }
  677. static __ramfunc void xspi_setup_bus_width(struct spinor_info *sni, u8_t bus_width)
  678. {
  679. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  680. spi->ctrl = (spi->ctrl & ~(0x3 << 10)) | (((bus_width & 0x7) / 2 + 1) << 10);
  681. xspi_delay();
  682. }
  683. static __sleepfunc void xspi_setup_delaychain(struct spinor_info *sni, u8_t delay)
  684. {
  685. struct acts_spi_reg *spi= (struct acts_spi_reg *)sni->spi.base;
  686. spi->delaychain = (spi->delaychain & ~(0x3F << 0)) | (delay << 0);
  687. xspi_delay();
  688. }
  689. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  690. extern int nor_test_delaychain(const struct device *dev);
  691. #endif
  692. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  693. extern void nor_dual_quad_read_mode_try(struct spinor_info *sni);
  694. #endif
  695. #include "flash_delaytran_table.c"
  696. //static const struct nor_delaychain_tbl *g_p_chipid_tbl;
  697. static const struct id_nor_delaychain_tbl *g_chipid_tbl;
  698. static void nor0_delaytran_init(uint32_t chip_id)
  699. {
  700. uint8_t i;
  701. g_chipid_tbl = &chipid_dl_tbl[0];
  702. for (i = 1; i < ARRAY_SIZE(chipid_dl_tbl); i++) {
  703. if (chipid_dl_tbl[i].chip_id == chip_id) {
  704. g_chipid_tbl = &chipid_dl_tbl[i];
  705. printk("nor find dl tbl=%d\n", i);
  706. break;
  707. }
  708. }
  709. }
  710. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  711. static const struct nor_delaychain_tbl *g_cs1_p_chipid_tbl = NULL;
  712. static void nor1_delaytran_init(uint32_t chip_id)
  713. {
  714. uint8_t i;
  715. g_cs1_p_chipid_tbl = chipid_dl_tbl[0].tbl;
  716. for (i = 1; i < ARRAY_SIZE(chipid_dl_tbl); i++) {
  717. if (chipid_dl_tbl[i].chip_id == chip_id) {
  718. g_cs1_p_chipid_tbl = chipid_dl_tbl[i].tbl;
  719. printk("nor cs1 find dl tbl=%d\n", i);
  720. break;
  721. }
  722. }
  723. }
  724. #endif
  725. static __ramfunc void nor_set_delaychain_by_vdd(struct spinor_info *sni, uint16_t vdd)
  726. {
  727. uint8_t i;
  728. const struct nor_delaychain_tbl *ptbl;
  729. ptbl = g_chipid_tbl->tbl;
  730. for (i = 0; i < CHIP_ID_TBL_NUM; i++) {
  731. if (ptbl[i].vdd_volt == vdd) {
  732. xspi_setup_delaychain(sni, ptbl[i].delay);
  733. sni->spi.delay_chain = ptbl[i].delay; //same as xip
  734. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  735. nor_cs0_delaytran = sni->spi.delay_chain;
  736. if(g_cs1_p_chipid_tbl == NULL){
  737. nor_cs1_delaytran = nor_cs0_delaytran;
  738. }else{
  739. nor_cs1_delaytran = g_cs1_p_chipid_tbl[i].delay;
  740. }
  741. #endif
  742. break;
  743. }
  744. }
  745. }
  746. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  747. static __ramfunc void spi0_set_clk_by_vdd(uint16_t vdd_volt)
  748. {
  749. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  750. if (vdd_volt <= 1000) {
  751. spi0_dtr_set_clk(MHZ(48) * 2);
  752. } else {
  753. spi0_dtr_set_clk(MHZ(g_chipid_tbl->max_clk) * 2);
  754. }
  755. #else
  756. if (vdd_volt < 1000) {
  757. clk_set_rate(CLOCK_ID_SPI0, MHZ(64));
  758. } else {
  759. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  760. }
  761. #endif
  762. }
  763. __dvfs_notifier_func static void nor_dvfs_notify(void *user_data, struct dvfs_freqs *dvfs_freq)
  764. {
  765. struct spinor_info *sni = (struct spinor_info *)user_data;
  766. struct dvfs_level *old_dvfs_level, *new_dvfs_level;
  767. uint32_t key;
  768. if (!dvfs_freq) {
  769. printk("dvfs notify invalid param");
  770. return ;
  771. }
  772. if (dvfs_freq->old_level == dvfs_freq->new_level)
  773. return ;
  774. old_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->old_level);
  775. new_dvfs_level = dvfs_get_info_by_level_id(dvfs_freq->new_level);
  776. if (old_dvfs_level->vdd_volt == new_dvfs_level->vdd_volt) {
  777. return;
  778. }
  779. key = irq_lock();
  780. if (old_dvfs_level->vdd_volt > new_dvfs_level->vdd_volt) {
  781. /* vdd voltage decrease */
  782. if (dvfs_freq->state == DVFS_EVENT_PRE_CHANGE) {
  783. spi0_set_clk_by_vdd(new_dvfs_level->vdd_volt);
  784. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  785. printk("nor delaychain update by vdd:%d => %d\n",
  786. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  787. }
  788. } else {
  789. /* vdd voltage increase */
  790. if (dvfs_freq->state == DVFS_EVENT_POST_CHANGE) {
  791. nor_set_delaychain_by_vdd(sni, new_dvfs_level->vdd_volt);
  792. spi0_set_clk_by_vdd(new_dvfs_level->vdd_volt);
  793. printk("nor delaychain update by vdd:%d => %d\n",
  794. old_dvfs_level->vdd_volt, new_dvfs_level->vdd_volt);
  795. }
  796. }
  797. irq_unlock(key);
  798. }
  799. static struct spinor_info spi_flash_acts_data;
  800. static struct dvfs_notifier __dvfs_notifier_data nor_dvsf_notifier = {
  801. .dvfs_notify_func_t = nor_dvfs_notify,
  802. .user_data = &spi_flash_acts_data,
  803. };
  804. #endif /* CONFIG_ACTS_DVFS_DYNAMIC_LEVEL */
  805. void spinor_test_uid_securty(const struct device *dev);
  806. __ramfunc static int spinor_wait_ready(struct spinor_info *sni)
  807. {
  808. unsigned char status;
  809. while (1) {
  810. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  811. if (!(status & 0x1))
  812. break;
  813. }
  814. return 0;
  815. }
  816. __ramfunc static int spi_flash_cfg_read(struct spinor_info *sni, uint32_t addr, uint8_t *cfg)
  817. {
  818. u8_t addr_len;
  819. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  820. if (cid > 24){
  821. addr_len = 4;
  822. }else{
  823. addr_len = 3;
  824. }
  825. return p_spinor_api->transfer(sni, SPINOR_CMD_RD_VOL_CFG, addr, addr_len, cfg, 1, 1, 0);
  826. }
  827. __ramfunc static int spi_flash_cfg_write(struct spinor_info *sni, uint32_t addr, uint8_t *cfg)
  828. {
  829. int ret;
  830. u8_t addr_len;
  831. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  832. if (cid > 24){
  833. addr_len = 4;
  834. }else{
  835. addr_len = 3;
  836. }
  837. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  838. ret = p_spinor_api->transfer(sni, SPINOR_CMD_WR_VOL_CFG, addr, addr_len, cfg, 1, 0, SPIMEM_TFLAG_WRITE_DATA);
  839. spinor_wait_ready(sni);
  840. return ret;
  841. }
  842. #define GD25B512M_CHIP_ID 0x1a47c8
  843. __ramfunc void spi_flash_init_by_chpid(struct spinor_info *sni)
  844. {
  845. uint8_t cfg = 0;
  846. if(sni->chipid == GD25B512M_CHIP_ID){
  847. spi_flash_cfg_read(sni, 6, &cfg);
  848. if(cfg &0x01){
  849. cfg &= 0xfe;
  850. spi_flash_cfg_write(sni, 6, &cfg); // xip enableed
  851. printk("eable xip\n");
  852. }
  853. }
  854. }
  855. __ramfunc int spi_flash_acts_init(const struct device *dev)
  856. {
  857. struct spinor_info *sni = DEV_DATA(dev);
  858. uint32_t key;
  859. uint8_t status, status2, status3;
  860. unsigned char cid;
  861. sni->spi.prepare_hook = spi_flash_acts_prepare;
  862. key = irq_lock();
  863. #ifndef CONFIG_SPI0_NOR_DTR_MODE //dtr mode not set clk
  864. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ/2)); // set low clk for read chipid
  865. #endif
  866. sni->chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  867. cid = (sni->chipid & 0xff0000)>>16;
  868. printk("read spi nor chipid:0x%x, cid=%d\n", sni->chipid, cid);
  869. nor0_delaytran_init(sni->chipid);
  870. sni->spi.freq_khz = KHZ(g_chipid_tbl->max_clk);
  871. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  872. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  873. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  874. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  875. if(status2 & (NOR_STATUS2_SUS1|NOR_STATUS2_SUS2))
  876. spinor_resume_finished(sni);
  877. if(cid > 24){ //capacity = 2^cid (byte)
  878. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  879. sys_set_bit(sni->spi.base , 16); //spi clt bit16 4byte mode
  880. }
  881. spi_flash_init_by_chpid(sni);
  882. /* configure delay chain */
  883. nor_set_delaychain_by_vdd(sni, 1200);
  884. #if IS_ENABLED(CONFIG_NOR_ACTS_DQ_MODE_ENABLE)
  885. nor_dual_quad_read_mode_try(sni);
  886. printk("bus width : %d, and cache read use ", sni->spi.bus_width);
  887. #else
  888. if(sni->spi.bus_width == 4) {
  889. printk("nor is 4 line mode\n");
  890. #ifndef CONFIG_SPI_4X_READ
  891. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  892. #endif
  893. xspi_nor_enable_status_qe(sni);
  894. /* enable 4x mode */
  895. xspi_setup_bus_width(sni, 4);
  896. } else if(sni->spi.bus_width == 2) {
  897. printk("nor is 2 line mode\n");
  898. /* enable 2x mode */
  899. xspi_setup_bus_width(sni, 2);
  900. } else {
  901. sni->spi.bus_width = 1;
  902. printk("nor is 1 line mode\n");
  903. /* enable 1x mode */
  904. xspi_setup_bus_width(sni, 1);
  905. }
  906. #endif
  907. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  908. ///*dtr mode set 4line to set mode must xip run code*/
  909. if(sni->spi.bus_width == 4){
  910. //while(loop);
  911. sni->chipid = spi0_cache_enter_dtr_mode(sni, sni->spi.freq_khz/1000);
  912. printk("spinor dtr mode , chipid:0x%x, clkreg=0x%x\n", sni->chipid, sys_read32(CMU_SPI0CLK));
  913. //while(loop);
  914. }
  915. #else
  916. /* setup SPI clock rate */
  917. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  918. #endif
  919. /* check delay chain workable */
  920. sni->chipid = p_spinor_api->read_chipid(sni) & 0xffffff;
  921. printk("read again spi nor chipid:0x%x\n", sni->chipid);
  922. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  923. unsigned int chip_id;
  924. spi_gpio_cs_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_1_GPIO_CS_PIN));
  925. if (!spi_gpio_cs_dev) {
  926. printk("failed to get gpio:%d device", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  927. irq_unlock(key);
  928. return -1;
  929. }
  930. clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ/2)); // set low clk for read cs1 delaytran
  931. nor_cs0_size = 1 << ((sni->chipid >> 16)&0xff);
  932. gpio_pin_configure(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, GPIO_OUTPUT_HIGH);
  933. gpio_pin_set(spi_gpio_cs_dev, CONFIG_SPI_FLASH_1_GPIO_CS_PIN % 32, 1);
  934. printk("use GPIO:%d as spi cs pin\n", CONFIG_SPI_FLASH_1_GPIO_CS_PIN);
  935. spi_flash1_cs_select(sni, 1);
  936. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  937. soc_udelay(100);
  938. p_spinor_api->write_cmd(sni, 0xff);// reset nor
  939. chip_id = sni->chipid; // back nor0 chipid
  940. sni->chipid = p_spinor_api->read_chipid(sni);
  941. nor_cs1_size = 1 << ((sni->chipid >> 16)&0xff);
  942. printk("cs0 nor size=0x%x, cs1 nor chipid:0x%x, size=0x%x\n", nor_cs0_size, sni->chipid, nor_cs1_size);
  943. if(nor_cs1_size > 0x1000000)
  944. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  945. xspi_nor_enable_status_qe(sni);
  946. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  947. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  948. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  949. printk("cs1 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  950. spi_flash1_cs_select(sni, 0);
  951. nor1_delaytran_init(sni->chipid);// init cs1 delaytran table
  952. nor_set_delaychain_by_vdd(sni, 1200); //init default delaytran by vdd
  953. sni->chipid = chip_id;
  954. //clk_set_rate(CLOCK_ID_SPI0, MHZ(CONFIG_SPI_FLASH_FREQ_MHZ));
  955. clk_set_rate(CLOCK_ID_SPI0, MHZ(g_chipid_tbl->max_clk));
  956. #endif //#if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  957. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  958. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  959. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  960. printk("spinor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  961. #ifdef CONFIG_SPI0_NOR_DTR_MODE
  962. //volatile unsigned loop = 1;
  963. if(sni->spi.bus_width == 4){
  964. //while(loop);
  965. sni->chipid = spi0_cache_enter_dtr_mode(sni, g_chipid_tbl->max_clk);
  966. printk("spinor dtr mode , chipid:0x%x\n", sni->chipid);
  967. //while(loop);
  968. }
  969. #endif
  970. #ifdef CONFIG_SPI0_NOR_QPI_MODE
  971. printk("qpi enable\n");
  972. xspi_nor_qpi_init(sni);
  973. printk("qpi enable ok\n");
  974. #endif
  975. //printk("nor power down\n");
  976. //p_spinor_api->transfer(sni, 0xB9, 0, 0, NULL, 0, 0, 0); // power down
  977. //printk("nor power down end\n");
  978. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  979. nor_test_delaychain(dev);
  980. #endif
  981. #ifdef CONFIG_ACTS_DVFS_DYNAMIC_LEVEL
  982. dvfs_register_notifier(&nor_dvsf_notifier);
  983. #endif
  984. #ifdef CONFIG_SPI_XIP_READ
  985. spi_flash_xip_init();
  986. #endif
  987. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  988. spinor_test_uid_securty(dev);
  989. #endif
  990. irq_unlock(key);
  991. flash_write_protection_set(dev, true);
  992. return 0;
  993. }
  994. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  995. static void spi_flash_acts_pages_layout(
  996. const struct device *dev,
  997. const struct flash_pages_layout **layout,
  998. size_t *layout_size)
  999. {
  1000. *layout = &(DEV_CFG(dev)->pages_layout);
  1001. *layout_size = 1;
  1002. }
  1003. #endif /* IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT) */
  1004. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  1005. extern int nor_write_protection(const struct device *dev, bool enable);
  1006. #endif
  1007. __ramfunc int spi_flash_acts_write_protection(const struct device *dev, bool enable)
  1008. {
  1009. #if IS_ENABLED(CONFIG_NOR_ACTS_DATA_PROTECTION_ENABLE)
  1010. nor_write_protection(dev, enable);
  1011. #endif
  1012. return 0;
  1013. }
  1014. static const struct flash_parameters flash_acts_parameters = {
  1015. .write_block_size = 0x1000,
  1016. .erase_value = 0xff,
  1017. };
  1018. static const struct flash_parameters *
  1019. spi_flash_get_parameters(const struct device *dev)
  1020. {
  1021. ARG_UNUSED(dev);
  1022. return &flash_acts_parameters;
  1023. }
  1024. #ifdef CONFIG_PM_DEVICE
  1025. int spi_flash_pm_control(const struct device *device, enum pm_device_action action)
  1026. {
  1027. if(action == PM_DEVICE_ACTION_LATE_RESUME){
  1028. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x1 << 5) , SPICACHE_CTL);
  1029. //printk("late reusme = 0x%x\n", sys_read32(SPICACHE_CTL));
  1030. }else if(action == PM_DEVICE_ACTION_EARLY_SUSPEND){
  1031. sys_write32((sys_read32(SPICACHE_CTL) & ~(0x3 << 5)) | (0x2 << 5) , SPICACHE_CTL);
  1032. //printk("nor early suspend = 0x%x\n", sys_read32(SPICACHE_CTL));
  1033. }
  1034. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  1035. struct spinor_info *sni = DEV_DATA(device);
  1036. if(action == PM_DEVICE_ACTION_RESUME){
  1037. printk("spi0 cs2 resume ...\n");
  1038. spi_flash1_cs_select(sni, 1);
  1039. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  1040. spi_flash1_cs_select(sni, 0);
  1041. }else if(action == PM_DEVICE_ACTION_SUSPEND){
  1042. printk("spi0 cs2 suspend ...\n");
  1043. spi_flash1_cs_select(sni, 1);
  1044. p_spinor_api->write_cmd(sni, 0xB9); // enter deep power down
  1045. spi_flash1_cs_select(sni, 0);
  1046. }
  1047. #endif
  1048. return 0;
  1049. }
  1050. #else
  1051. #define spi_flash_pm_control NULL
  1052. #endif
  1053. #if defined(CONFIG_SPI_FLASH_GPIO_2CS) && (CONFIG_SPI_FLASH_GPIO_2CS == 1)
  1054. K_MUTEX_DEFINE(spinor_cs2_w_mutex);
  1055. static void spi_flash_cs2_lock(void)
  1056. {
  1057. if(!k_is_in_isr()){
  1058. k_mutex_lock(&spinor_cs2_w_mutex, K_FOREVER);
  1059. }
  1060. }
  1061. static void spi_flash_cs2_unlock(void)
  1062. {
  1063. if(!k_is_in_isr()){
  1064. k_mutex_unlock(&spinor_cs2_w_mutex);
  1065. }
  1066. }
  1067. int spi_flash_acts_2cs_read(const struct device *dev, off_t offset, void *data, size_t len)
  1068. {
  1069. size_t tlen;
  1070. int ret;
  1071. if(offset < nor_cs0_size){
  1072. if(offset+len > nor_cs0_size){
  1073. tlen = nor_cs0_size - offset;
  1074. spi_flash_acts_read(dev, offset, data, tlen);
  1075. offset = nor_cs0_size;
  1076. data = (void *)((unsigned int )data + tlen);
  1077. len -= tlen;
  1078. }
  1079. }
  1080. if(offset < nor_cs0_size){// read cs0 not lock
  1081. ret = spi_flash_acts_read(dev, offset, data, len);
  1082. }else{
  1083. spi_flash_cs2_lock();
  1084. ret = spi_flash_acts_read(dev, offset, data, len);
  1085. spi_flash_cs2_unlock();
  1086. }
  1087. return ret;
  1088. }
  1089. int spi_flash_acts_2cs_write(const struct device *dev, off_t offset, const void *data, size_t len)
  1090. {
  1091. size_t tlen;
  1092. int ret;
  1093. if (spi_flash_not_wr())
  1094. return -1;
  1095. spi_flash_cs2_lock();
  1096. if(offset < nor_cs0_size){
  1097. if(offset+len > nor_cs0_size){
  1098. tlen = nor_cs0_size - offset;
  1099. spi_flash_acts_write(dev, offset, data, tlen);
  1100. offset = nor_cs0_size;
  1101. data = (const void *)((unsigned int )data + tlen);
  1102. len -= tlen;
  1103. }
  1104. }
  1105. ret = spi_flash_acts_write(dev, offset, data, len);
  1106. spi_flash_cs2_unlock();
  1107. return ret;
  1108. }
  1109. int spi_flash_acts_2cs_erase(const struct device *dev, off_t offset, size_t size)
  1110. {
  1111. size_t tlen;
  1112. int ret;
  1113. if (spi_flash_not_wr())
  1114. return -1;
  1115. spi_flash_cs2_lock();
  1116. if(offset < nor_cs0_size){
  1117. if(offset+size > nor_cs0_size){
  1118. tlen = nor_cs0_size - offset;
  1119. spi_flash_acts_erase(dev, offset, tlen);
  1120. offset = nor_cs0_size;
  1121. size -= tlen;
  1122. }
  1123. }
  1124. ret = spi_flash_acts_erase(dev, offset, size);
  1125. spi_flash_cs2_unlock();
  1126. return ret;
  1127. }
  1128. static struct flash_driver_api spi_flash_nor_api = {
  1129. .read = spi_flash_acts_2cs_read,
  1130. .write = spi_flash_acts_2cs_write,
  1131. .erase = spi_flash_acts_2cs_erase,
  1132. .write_protection = spi_flash_acts_write_protection,
  1133. .get_parameters = spi_flash_get_parameters,
  1134. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1135. .page_layout = spi_flash_acts_pages_layout,
  1136. #endif
  1137. };
  1138. #else
  1139. static struct flash_driver_api spi_flash_nor_api = {
  1140. .read = spi_flash_acts_read,
  1141. .write = spi_flash_acts_write,
  1142. .erase = spi_flash_acts_erase,
  1143. .write_protection = spi_flash_acts_write_protection,
  1144. .get_parameters = spi_flash_get_parameters,
  1145. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1146. .page_layout = spi_flash_acts_pages_layout,
  1147. #endif
  1148. };
  1149. #endif
  1150. /* system XIP spinor */
  1151. static struct spinor_info spi_flash_acts_data = {
  1152. .spi = {
  1153. .base = SPI0_REG_BASE,
  1154. .bus_width = CONFIG_SPI_FLASH_BUS_WIDTH,
  1155. .delay_chain = CONFIG_SPI_FLASH_DELAY_CHAIN,
  1156. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  1157. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  1158. #endif
  1159. .flag = 0,
  1160. },
  1161. .flag = 0,
  1162. };
  1163. #if 0
  1164. void spinor_set(int bus_width, int use_dma, int nxio)
  1165. {
  1166. spi_flash_acts_data.spi.bus_width = bus_width;
  1167. if(use_dma)
  1168. spi_flash_acts_data.spi.dma_base = (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100));
  1169. else
  1170. spi_flash_acts_data.spi.dma_base = 0;
  1171. if(nxio){
  1172. spi_flash_acts_data.spi.flag |= SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO;
  1173. }else{
  1174. spi_flash_acts_data.spi.flag &= ~(SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO);
  1175. }
  1176. }
  1177. #endif
  1178. static const struct spi_flash_acts_config spi_acts_config = {
  1179. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1180. .pages_layout = {
  1181. .pages_count = CONFIG_SPI_FLASH_CHIP_SIZE/0x1000,
  1182. .pages_size = 0x1000,
  1183. },
  1184. #endif
  1185. .chip_size = CONFIG_SPI_FLASH_CHIP_SIZE,
  1186. .page_size = 0x1000,
  1187. };
  1188. #if IS_ENABLED(CONFIG_SPI_FLASH_0)
  1189. DEVICE_DEFINE(spi_flash_acts, CONFIG_SPI_FLASH_NAME, &spi_flash_acts_init, spi_flash_pm_control,
  1190. &spi_flash_acts_data, &spi_acts_config, PRE_KERNEL_1,
  1191. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &spi_flash_nor_api);
  1192. #endif
  1193. #if IS_ENABLED(CONFIG_SPI_FLASH_2)
  1194. static K_MUTEX_DEFINE(flash_2_mutex);
  1195. static int spi_flash_2_acts_read(const struct device *dev, off_t offset, void *data, size_t len)
  1196. {
  1197. struct spinor_info *sni = DEV_DATA(dev);
  1198. int ret = 0;
  1199. size_t tmplen;
  1200. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1201. tmplen = len;
  1202. while(tmplen > 0) {
  1203. if(tmplen < 0x8000)
  1204. len = tmplen;
  1205. else
  1206. len = 0x8000;
  1207. ret = p_spinor_api->read(sni, offset, data, len);
  1208. offset += len;
  1209. data = (void *)((unsigned int )data + len);
  1210. tmplen -= len;
  1211. }
  1212. k_mutex_unlock(&flash_2_mutex);
  1213. return ret;
  1214. }
  1215. static int spi_flash_2_acts_write(const struct device *dev, off_t offset, const void *data, size_t len)
  1216. {
  1217. struct spinor_info *sni = DEV_DATA(dev);
  1218. int ret;
  1219. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1220. ret = p_spinor_api->write(sni, offset, data, len);
  1221. k_mutex_unlock(&flash_2_mutex);
  1222. return ret ;
  1223. }
  1224. static int spi_flash_2_acts_erase(const struct device *dev, off_t offset, size_t size)
  1225. {
  1226. struct spinor_info *sni = DEV_DATA(dev);
  1227. int ret;
  1228. k_mutex_lock(&flash_2_mutex, K_FOREVER);
  1229. ret = p_spinor_api->erase(sni, offset, size);
  1230. k_mutex_unlock(&flash_2_mutex);
  1231. return ret ;
  1232. }
  1233. static int spi_flash_2_pwoer(struct spinor_info *sni, bool on)
  1234. {
  1235. #if IS_ENABLED(CONFIG_SPI_FLASH_2_USE_GPIO_POWER)
  1236. int ret;
  1237. int gpio_value = CONFIG_SPI_FLASH_2_GPIO_POWER_LEVEL;
  1238. const struct device *power_gpio_dev;
  1239. uint8_t power_gpio = CONFIG_SPI_FLASH_2_POWER_GPIO % 32;
  1240. power_gpio_dev = device_get_binding(CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  1241. if (!power_gpio_dev) {
  1242. LOG_ERR("Failed to bind nor power GPIO(%d:%s)", power_gpio, CONFIG_GPIO_PIN2NAME(CONFIG_SPI_FLASH_2_POWER_GPIO));
  1243. return -1;
  1244. }
  1245. ret = gpio_pin_configure(power_gpio_dev, power_gpio, GPIO_OUTPUT);
  1246. if (ret) {
  1247. LOG_ERR("Failed to config output GPIO:%d", power_gpio);
  1248. return ret;
  1249. }
  1250. if (on) {
  1251. /* power on nor */
  1252. gpio_pin_set(power_gpio_dev, power_gpio, gpio_value);
  1253. } else {
  1254. /* power off nor */
  1255. gpio_pin_set(power_gpio_dev, power_gpio, !gpio_value);
  1256. }
  1257. #else
  1258. if (on) {
  1259. p_spinor_api->write_cmd(sni, 0xAB); //exit deep power down
  1260. } else {
  1261. p_spinor_api->write_cmd(sni, 0xB9); // enter deep power down
  1262. }
  1263. #endif
  1264. return 0;
  1265. }
  1266. #ifdef CONFIG_PM_DEVICE
  1267. int spi_flash_2_pm_control(const struct device *device, enum pm_device_action action)
  1268. {
  1269. struct spinor_info *sni = DEV_DATA(device);
  1270. if(action == PM_DEVICE_ACTION_RESUME){
  1271. LOG_INF("spi2 nor resume ...\n");
  1272. spi_flash_2_pwoer(sni, true);
  1273. }else if(action == PM_DEVICE_ACTION_SUSPEND){
  1274. LOG_INF("spi2 nor suspend ...\n");
  1275. spi_flash_2_pwoer(sni, false);
  1276. }
  1277. return 0;
  1278. }
  1279. #else
  1280. #define spi_flash_2_pm_control NULL
  1281. #endif
  1282. static int spi_flash_2_acts_init(const struct device *dev)
  1283. {
  1284. struct spinor_info *sni = DEV_DATA(dev);
  1285. uint8_t status, status2, status3;
  1286. unsigned char cid;
  1287. printk("spi3 flash init\n");
  1288. /* enable spi3 controller clock */
  1289. acts_clock_peripheral_enable(CLOCK_ID_SPI3);
  1290. /* reset spi3 controller */
  1291. acts_reset_peripheral(RESET_ID_SPI3);
  1292. /* setup SPI3 clock rate */
  1293. clk_set_rate(CLOCK_ID_SPI3, MHZ(CONFIG_SPI_FLASH_2_FREQ_MHZ));
  1294. spi_flash_2_pwoer(sni, true);
  1295. sni->chipid = p_spinor_api->read_chipid(sni);
  1296. cid = (sni->chipid & 0xff0000)>>16;
  1297. printk("read spi3 nor chipid:0x%x, cid=%d\n", sni->chipid, cid);
  1298. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1299. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1300. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1301. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1302. if(cid > 24){ //capacity = 2^cid (byte)
  1303. p_spinor_api->set_addr_mode(sni, 4); // set 4byte mode
  1304. }
  1305. if(sni->spi.bus_width == 4) {
  1306. printk("data nor is 4 line mode\n");
  1307. sni->spi.flag |= SPI_FLAG_SPI_4XIO;
  1308. /* check QE bit */
  1309. if (!(status2 & 0x2)) {
  1310. /* set QE bit to disable HOLD/WP pin function, for WinBond */
  1311. status2 |= 0x2;
  1312. p_spinor_api->write_status(sni, XSPI_NOR_CMD_WRITE_STATUS2,
  1313. (u8_t *)&status2, 1);
  1314. }
  1315. } else if(sni->spi.bus_width == 2) {
  1316. printk("data nor is 2 line mode\n");
  1317. } else {
  1318. sni->spi.bus_width = 1;
  1319. printk("data nor is 1 line mode\n");
  1320. }
  1321. /* check delay chain workable */
  1322. sni->chipid = p_spinor_api->read_chipid(sni);
  1323. printk("read again spi3 nor chipid:0x%x\n", sni->chipid);
  1324. status = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS);
  1325. status2 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS2);
  1326. status3 = p_spinor_api->read_status(sni, XSPI_NOR_CMD_READ_STATUS3);
  1327. printk("spi3 nor status: {0x%02x 0x%02x 0x%02x}\n", status, status2, status3);
  1328. #if IS_ENABLED(CONFIG_SPINOR_TEST_DELAYCHAIN)
  1329. //nor_test_delaychain(dev);
  1330. #endif
  1331. return 0;
  1332. }
  1333. static struct flash_driver_api spi_flash_2_nor_api = {
  1334. .read = spi_flash_2_acts_read,
  1335. .write = spi_flash_2_acts_write,
  1336. .erase = spi_flash_2_acts_erase,
  1337. .get_parameters = spi_flash_get_parameters,
  1338. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1339. .page_layout = spi_flash_acts_pages_layout,
  1340. #endif
  1341. };
  1342. static struct spinor_info spi_flash_2_acts_data = {
  1343. .spi = {
  1344. .base = SPI3_REG_BASE,
  1345. .bus_width = CONFIG_SPI_FLASH_2_BUS_WIDTH,
  1346. .delay_chain = CONFIG_SPI_FLASH_2_DELAY_CHAIN,
  1347. #if (CONFIG_DMA_SPINOR_RESEVER_CHAN < CONFIG_DMA_0_PCHAN_NUM)
  1348. .dma_base= (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100)),
  1349. #endif
  1350. .flag = SPI_FLAG_NO_IRQ_LOCK,
  1351. },
  1352. .flag = 0,
  1353. };
  1354. #if 0
  1355. void spinor3_set(int bus_width, int use_dma, int nxio)
  1356. {
  1357. spi_flash_2_acts_data.spi.bus_width = bus_width;
  1358. if(use_dma)
  1359. spi_flash_2_acts_data.spi.dma_base = (DMA_REG_BASE + 0x100 + (CONFIG_DMA_SPINOR_RESEVER_CHAN * 0x100));
  1360. else
  1361. spi_flash_2_acts_data.spi.dma_base = 0;
  1362. if(nxio){
  1363. spi_flash_2_acts_data.spi.flag |= SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO;
  1364. }else{
  1365. spi_flash_2_acts_data.spi.flag &= ~(SPI_FLAG_SPI_NXIO | SPI_FLAG_WR_4IO);
  1366. }
  1367. }
  1368. void spinor03_set_dt(int spi, int delaytran)
  1369. {
  1370. if(spi == 0)
  1371. spi_flash_acts_data.spi.delay_chain = delaytran;
  1372. else
  1373. spi_flash_2_acts_data.spi.delay_chain = delaytran;
  1374. }
  1375. #endif
  1376. static const struct spi_flash_acts_config spi_flash_2_acts_config = {
  1377. #if IS_ENABLED(CONFIG_FLASH_PAGE_LAYOUT)
  1378. .pages_layout = {
  1379. .pages_count = CONFIG_SPI_FLASH_2_CHIP_SIZE/0x1000,
  1380. .pages_size = 0x1000,
  1381. },
  1382. #endif
  1383. .chip_size = CONFIG_SPI_FLASH_2_CHIP_SIZE,
  1384. .page_size = 0x1000,
  1385. };
  1386. DEVICE_DEFINE(spi_flash_2_acts, CONFIG_SPI_FLASH_2_NAME, &spi_flash_2_acts_init, spi_flash_2_pm_control,
  1387. &spi_flash_2_acts_data, &spi_flash_2_acts_config, POST_KERNEL,
  1388. CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, &spi_flash_2_nor_api);
  1389. #endif
  1390. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  1391. #define SPINOR_CMD_SECURITY_ERASE 0x44 /* erase security registers cmd*/
  1392. #define SPINOR_CMD_SECURITY_PROGRAM 0x42 /* program security registers cmd*/
  1393. #define SPINOR_CMD_SECURITY_READ 0x48 /* read security registers cmd*/
  1394. #define SPINOR_CMD_UID_READ 0x4B /* Read Unique ID cmd*/
  1395. __ramfunc static int spinor_erase_security(struct spinor_info *sni, unsigned int addr)
  1396. {
  1397. u32_t key;
  1398. u8_t addr_mode;
  1399. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1400. if (cid > 24)
  1401. addr_mode = 4;
  1402. else
  1403. addr_mode = 3;
  1404. key = irq_lock(); //ota diff upgrade, must be irq lock
  1405. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  1406. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_ERASE, addr, addr_mode, 0, 0, 0, SPIMEM_TFLAG_WRITE_DATA);
  1407. spinor_wait_ready(sni);
  1408. irq_unlock(key);
  1409. return 0;
  1410. }
  1411. __ramfunc static int spinor_write_security(struct spinor_info *sni, unsigned int addr, const void *data, int len)
  1412. {
  1413. u32_t key;
  1414. u8_t addr_mode;
  1415. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1416. if (cid > 24)
  1417. addr_mode = 4;
  1418. else
  1419. addr_mode = 3;
  1420. key = irq_lock();
  1421. p_spinor_api->write_cmd(sni, SPIMEM_CMD_ENABLE_WRITE);
  1422. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_PROGRAM, addr, addr_mode, (u8_t *)data, len, 0, SPIMEM_TFLAG_WRITE_DATA);
  1423. spinor_wait_ready(sni);
  1424. irq_unlock(key);
  1425. return 0;
  1426. }
  1427. __ramfunc static int spinor_read_security(struct spinor_info *sni, unsigned int addr, void *data, int len)
  1428. {
  1429. u32_t key;
  1430. u8_t addr_mode;
  1431. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1432. if (cid > 24)
  1433. addr_mode = 4;
  1434. else
  1435. addr_mode = 3;
  1436. key = irq_lock();
  1437. p_spinor_api->transfer(sni, SPINOR_CMD_SECURITY_READ, addr, addr_mode, (u8_t *)data, len, 1, 0);
  1438. irq_unlock(key);
  1439. return 0;
  1440. }
  1441. __ramfunc static int spinor_read_uid(struct spinor_info *sni, void *data, int len)
  1442. {
  1443. u32_t key;
  1444. u8_t addr_mode;
  1445. u8_t cid = (sni->chipid & 0xff0000) >> 16;
  1446. if (cid > 24)
  1447. addr_mode = 4;
  1448. else
  1449. addr_mode = 3;
  1450. key = irq_lock();
  1451. p_spinor_api->transfer(sni, SPINOR_CMD_UID_READ, 0, addr_mode, (u8_t *)data, len, 1, 0);
  1452. irq_unlock(key);
  1453. return 0;
  1454. }
  1455. #define NOR_SE_PAGE_SIZE 256
  1456. #define NOR_SE_PAGE_MASK (NOR_SE_PAGE_SIZE-1)
  1457. #define NOR_SE_MAX_SIZE_EACH_REGN 1024 /**/
  1458. /*security_regn 0-3*/
  1459. int spi_flash_security_erase(const struct device *dev, unsigned int security_regn)
  1460. {
  1461. struct spinor_info *sni = DEV_DATA(dev);
  1462. return spinor_erase_security(sni, security_regn<<12);
  1463. }
  1464. int spi_flash_security_write(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  1465. {
  1466. unsigned int wlen, unlen;
  1467. struct spinor_info *sni = DEV_DATA(dev);
  1468. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  1469. return -1;
  1470. unlen = offset & NOR_SE_PAGE_MASK;
  1471. while(len){
  1472. if(unlen){
  1473. wlen = NOR_SE_PAGE_SIZE - unlen;
  1474. if(wlen > len)
  1475. wlen = len;
  1476. unlen = 0;
  1477. }else{
  1478. if(len < NOR_SE_PAGE_SIZE)
  1479. wlen = len;
  1480. else
  1481. wlen = NOR_SE_PAGE_SIZE;
  1482. }
  1483. spinor_write_security(sni, (security_regn<<12)|offset, data, wlen);
  1484. data = (unsigned char *)data + wlen;
  1485. len -= wlen;
  1486. offset += wlen;
  1487. }
  1488. return 0;
  1489. }
  1490. int spi_flash_security_read(const struct device *dev, unsigned int security_regn, unsigned int offset, void *data, unsigned int len)
  1491. {
  1492. struct spinor_info *sni = DEV_DATA(dev);
  1493. if(offset + len >= NOR_SE_MAX_SIZE_EACH_REGN)
  1494. return -1;
  1495. return spinor_read_security(sni, (security_regn<<12)|offset, data, len);
  1496. }
  1497. int spi_flash_uid_read(const struct device *dev, void *uid, unsigned int len)
  1498. {
  1499. struct spinor_info *sni = DEV_DATA(dev);
  1500. return spinor_read_uid(sni, uid, len);
  1501. }
  1502. #include <string.h>
  1503. static unsigned int g_tmp_buf[256];
  1504. void spinor_test_uid_securty(const struct device *dev)
  1505. {
  1506. unsigned int start_ms,end_ms, i, k;
  1507. unsigned int *pb = g_tmp_buf;
  1508. spi_flash_uid_read(dev, pb, 16);
  1509. printk("uid=0x%x, 0x%x, 0x%x, 0x%x\n", pb[0], pb[1], pb[2], pb[3]);
  1510. for(i = 1; i < 4; i++){// test security1-security3
  1511. start_ms = k_cycle_get_32();
  1512. spi_flash_security_erase(dev, i);
  1513. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  1514. printk("scurity erase %d use=%d ms\n", i, end_ms);
  1515. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1516. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {// check erase ok
  1517. if(pb[k] != 0xffffffff){
  1518. printk("erase check fail %d : off=0x%x, 0x%x!=0xffffffff\n", i, k*4, pb[k]);
  1519. break;
  1520. }
  1521. }
  1522. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++) {
  1523. pb[k] = k + 0x12345600*i;
  1524. }
  1525. start_ms = k_cycle_get_32();
  1526. spi_flash_security_write(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1527. end_ms = k_cyc_to_ms_near32(k_cycle_get_32() -start_ms) ;
  1528. printk("scurity write 1KB %d use=%d ms\n", i, end_ms);
  1529. }
  1530. for(i = 1; i < 4; i++){
  1531. memset(pb, 0, NOR_SE_PAGE_SIZE);
  1532. spi_flash_security_read(dev, i, 200, pb, NOR_SE_PAGE_SIZE);
  1533. for(k = 0; k < NOR_SE_PAGE_SIZE/4; k++){
  1534. if(pb[k] != k + 0x12345600*i){
  1535. printk("scurity read cmp fail:%d,off=0x%x,0x%x!=0x%x\n",i, k*4, pb[k], k + 0x12345600*i);
  1536. break;
  1537. }
  1538. }
  1539. }
  1540. printk("secutrity test finished\n");
  1541. }
  1542. #endif