spinor.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. /*
  2. * Copyright (c) 2017 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief SPINOR Flash driver for LARK
  9. */
  10. #include <irq.h>
  11. #include "spi_internal.h"
  12. #include "spimem.h"
  13. #include "../spi_flash.h"
  14. #include <board_cfg.h>
  15. /* spinor parameters */
  16. #define SPINOR_WRITE_PAGE_SIZE_BITS 8
  17. #define SPINOR_ERASE_SECTOR_SIZE_BITS 12
  18. #define SPINOR_ERASE_BLOCK_SIZE_BITS 16
  19. #define SPINOR_WRITE_PAGE_SIZE (1 << SPINOR_WRITE_PAGE_SIZE_BITS)
  20. #define SPINOR_ERASE_SECTOR_SIZE (1 << SPINOR_ERASE_SECTOR_SIZE_BITS)
  21. #define SPINOR_ERASE_BLOCK_SIZE (1 << SPINOR_ERASE_BLOCK_SIZE_BITS)
  22. #define SPINOR_WRITE_PAGE_MASK (SPINOR_WRITE_PAGE_SIZE - 1)
  23. #define SPINOR_ERASE_SECTOR_MASK (SPINOR_ERASE_SECTOR_SIZE - 1)
  24. #define SPINOR_ERASE_BLOCK_MASK (SPINOR_ERASE_BLOCK_SIZE - 1)
  25. /* spinor commands */
  26. #define SPINOR_CMD_WRITE_PAGE 0x02 /* write one page */
  27. #define SPINOR_CMD_DISABLE_WRITE 0x04 /* disable write */
  28. #define SPINOR_CMD_READ_STATUS 0x05 /* read status1 */
  29. #define SPINOR_CMD_READ_STATUS2 0x35 /* read status2 */
  30. #define SPINOR_CMD_READ_STATUS3 0x15 /* read status3 */
  31. #define SPINOR_CMD_WRITE_STATUS 0x01 /* write status1 */
  32. #define SPINOR_CMD_WRITE_STATUS2 0x31 /* write status2 */
  33. #define SPINOR_CMD_WRITE_STATUS3 0x11 /* write status3 */
  34. #define SPINOR_CMD_ENABLE_WRITE 0x06 /* enable write */
  35. #define SPINOR_CMD_FAST_READ 0x0b /* fast read */
  36. #define SPINOR_CMD_ERASE_SECTOR 0x20 /* 4KB erase */
  37. #define SPINOR_CMD_ERASE_BLOCK_32K 0x52 /* 32KB erase */
  38. #define SPINOR_CMD_ERASE_BLOCK 0xd8 /* 64KB erase */
  39. #define SPINOR_CMD_READ_CHIPID 0x9f /* JEDEC ID */
  40. #define SPINOR_CMD_DISABLE_QSPI 0xff /* disable QSPI */
  41. #define SPINOR_CMD_PROGRAM_ERASE_RESUME 0x7a /* nor resume */
  42. #define SPINOR_CMD_PROGRAM_ERASE_SUSPEND 0x75 /* nor suspend */
  43. #define SPINOR_CMD_SECURITY_ERASE 0x44 /* erase security registers cmd*/
  44. #define SPINOR_CMD_SECURITY_PROGRAM 0x42 /* program security registers cmd*/
  45. #define SPINOR_CMD_SECURITY_READ 0x48 /* read security registers cmd*/
  46. #define SPINOR_CMD_UID_READ 0x4B /* Read Unique ID cmd*/
  47. #define SPINOR_CMD_EN4B 0xB7 /* enter 4-byte address mode */
  48. #define SPINOR_CMD_EXIT4B 0xE9 /* exit 4-byte address mode */
  49. #define SPINOR_CMD_WR_EXTADDR 0xC5 /* write extern address */
  50. #define SPINOR_CMD_WR_NONVOL_CFG 0xb1 /*Write Nonvolatile Configuration Registe*/
  51. #define SPINOR_CMD_RD_NONVOL_CFG 0xb5 /*read Nonvolatile Configuration Registe*/
  52. #define SPINOR_CMD_WR_VOL_CFG 0x81 /*Write volatile Configuration Registe*/
  53. #define SPINOR_CMD_RD_VOL_CFG 0x85 /*read volatile Configuration Registe*/
  54. /* spinor 4byte address commands */
  55. #define SPINOR_CMD_WRITE_PAGE_4B 0x12 /* write one page by 4bytes address */
  56. #define SPINOR_CMD_ERASE_SECTOR_4B 0x21 /* 4KB erase by 4bytes address */
  57. #define SPINOR_CMD_ERASE_BLOCK_4B 0xdc /* 64KB erase by 4bytes address */
  58. #define SPINOR_CMD_WIRTE_EXT_ADDR_R 0xc5 /* write Extended Addr. Register cmd+1BYTE(ext addr)*/
  59. #define SPINOR_CMD_READ_EXT_ADDR_R 0xc8 /* Read Extended Addr. Register */
  60. /* NOR Flash vendors ID */
  61. #define SPINOR_MANU_ID_ALLIANCE 0x52 /* Alliance Semiconductor */
  62. #define SPINOR_MANU_ID_AMD 0x01 /* AMD */
  63. #define SPINOR_MANU_ID_AMIC 0x37 /* AMIC */
  64. #define SPINOR_MANU_ID_ATMEL 0x1f /* ATMEL */
  65. #define SPINOR_MANU_ID_CATALYST 0x31 /* Catalyst */
  66. #define SPINOR_MANU_ID_ESMT 0x8c /* ESMT */
  67. #define SPINOR_MANU_ID_EON 0x1c /* EON */
  68. #define SPINOR_MANU_ID_FD_MICRO 0xa1 /* shanghai fudan microelectronics */
  69. #define SPINOR_MANU_ID_FIDELIX 0xf8 /* FIDELIX */
  70. #define SPINOR_MANU_ID_FMD 0x0e /* Fremont Micro Device(FMD) */
  71. #define SPINOR_MANU_ID_FUJITSU 0x04 /* Fujitsu */
  72. #define SPINOR_MANU_ID_GIGADEVICE 0xc8 /* GigaDevice */
  73. #define SPINOR_MANU_ID_GIGADEVICE2 0x51 /* GigaDevice2 */
  74. #define SPINOR_MANU_ID_HYUNDAI 0xad /* Hyundai */
  75. #define SPINOR_MANU_ID_INTEL 0x89 /* Intel */
  76. #define SPINOR_MANU_ID_MACRONIX 0xc2 /* Macronix (MX) */
  77. #define SPINOR_MANU_ID_NANTRONIC 0xd5 /* Nantronics */
  78. #define SPINOR_MANU_ID_NUMONYX 0x20 /* Numonyx, Micron, ST */
  79. #define SPINOR_MANU_ID_PMC 0x9d /* PMC */
  80. #define SPINOR_MANU_ID_SANYO 0x62 /* SANYO */
  81. #define SPINOR_MANU_ID_SHARP 0xb0 /* SHARP */
  82. #define SPINOR_MANU_ID_SPANSION 0x01 /* SPANSION */
  83. #define SPINOR_MANU_ID_SST 0xbf /* SST */
  84. #define SPINOR_MANU_ID_SYNCMOS_MVC 0x40 /* SyncMOS (SM) and Mosel Vitelic Corporation (MVC) */
  85. #define SPINOR_MANU_ID_TI 0x97 /* Texas Instruments */
  86. #define SPINOR_MANU_ID_WINBOND 0xda /* Winbond */
  87. #define SPINOR_MANU_ID_WINBOND_NEX 0xef /* Winbond (ex Nexcom) */
  88. #define SPINOR_MANU_ID_ZH_BERG 0xe0 /* ZhuHai Berg microelectronics (Bo Guan) */
  89. //#define SPINOR_FLAG_UNLOCK_IRQ_WAIT_READY (1 << 0)
  90. #define NOR_DELAY_CHAIN 0x8
  91. /* system XIP spinor */
  92. static const struct spinor_info system_spi_nor = {
  93. .spi = {
  94. .base = SPI0_REG_BASE,
  95. .bus_width = 1,
  96. .delay_chain = NOR_DELAY_CHAIN,
  97. .flag = 0,
  98. #if 0
  99. .dma_base= 0x4001C600, //DMA5
  100. #endif
  101. }
  102. };
  103. _nor_fun static unsigned int spinor_read_status(struct spinor_info *sni, unsigned char cmd)
  104. {
  105. if (!sni)
  106. sni = (struct spinor_info *)&system_spi_nor;
  107. return spimem_read_status(&sni->spi, cmd);
  108. }
  109. _nor_fun static int spinor_wait_ready(struct spinor_info *sni)
  110. {
  111. unsigned char status;
  112. while (1) {
  113. status = spinor_read_status(sni, SPINOR_CMD_READ_STATUS);
  114. if (!(status & 0x1))
  115. break;
  116. }
  117. return 0;
  118. }
  119. _nor_fun static void spinor_write_data(struct spinor_info *sni, unsigned char cmd,
  120. unsigned int addr, int addr_len, const unsigned char *buf, int len)
  121. {
  122. struct spi_info *si = &sni->spi;
  123. unsigned int key = 0;
  124. if (!(si->flag & SPI_FLAG_NO_IRQ_LOCK)) {
  125. key = irq_lock();
  126. }
  127. spimem_set_write_protect(si, 0);
  128. spimem_transfer(si, cmd, addr, addr_len, (unsigned char *)buf, len,
  129. 0, SPIMEM_TFLAG_WRITE_DATA);
  130. if(sni->flag & SPINOR_FLAG_NO_WAIT_READY){
  131. if (!(si->flag & SPI_FLAG_NO_IRQ_LOCK))
  132. irq_unlock(key);
  133. }else{
  134. if (!(si->flag & SPI_FLAG_NO_IRQ_LOCK)) {
  135. if (sni->flag & SPINOR_FLAG_UNLOCK_IRQ_WAIT_READY) {
  136. irq_unlock(key);
  137. spinor_wait_ready(sni);
  138. } else {
  139. spinor_wait_ready(sni);
  140. irq_unlock(key);
  141. }
  142. }else{
  143. spinor_wait_ready(sni);
  144. }
  145. }
  146. }
  147. _nor_fun static void spinor_write_status(struct spinor_info *sni, unsigned char cmd,
  148. unsigned char *status, int len)
  149. {
  150. if (!sni)
  151. sni = (struct spinor_info *)&system_spi_nor;
  152. spinor_write_data(sni, cmd, 0, 0, status, len);
  153. spinor_wait_ready(sni);
  154. }
  155. _nor_fun static int spinor_erase_internal(struct spinor_info *sni,
  156. unsigned char cmd, unsigned int addr)
  157. {
  158. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  159. spinor_write_data(sni, cmd, addr, 4, 0, 0);
  160. } else {
  161. spinor_write_data(sni, cmd, addr, 3, 0, 0);
  162. }
  163. return 0;
  164. }
  165. _nor_fun static int spinor_write_internal(struct spinor_info *sni,
  166. unsigned int addr, const unsigned char *buf, int len)
  167. {
  168. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  169. spinor_write_data(sni, SPINOR_CMD_WRITE_PAGE_4B, addr, 4, buf, len);
  170. } else {
  171. spinor_write_data(sni, SPINOR_CMD_WRITE_PAGE, addr, 3, buf, len);
  172. }
  173. return 0;
  174. }
  175. _nor_fun static int spinor_read_internal(struct spinor_info *sni,
  176. unsigned int addr, unsigned char *buf, int len)
  177. {
  178. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  179. spimem_read_page(&sni->spi, addr, 4, buf, len);
  180. } else {
  181. spimem_read_page(&sni->spi, addr, 3, buf, len);
  182. }
  183. return 0;
  184. }
  185. _nor_fun int spinor_read(struct spinor_info *sni, unsigned int addr, void *data, int len)
  186. {
  187. if (!len)
  188. return 0;
  189. if (!sni)
  190. sni = (struct spinor_info *)&system_spi_nor;
  191. spinor_read_internal(sni, addr, data, len);
  192. return 0;
  193. }
  194. _nor_fun int spinor_write(struct spinor_info *sni, unsigned int addr,
  195. const void *data, int len)
  196. {
  197. int unaligned_len, remaining, write_size;
  198. if (!len)
  199. return 0;
  200. if (!sni)
  201. sni = (struct spinor_info *)&system_spi_nor;
  202. /* unaligned write? */
  203. if (addr & SPINOR_WRITE_PAGE_MASK)
  204. unaligned_len = SPINOR_WRITE_PAGE_SIZE - (addr & SPINOR_WRITE_PAGE_MASK);
  205. else
  206. unaligned_len = 0;
  207. remaining = len;
  208. while (remaining > 0) {
  209. if (unaligned_len) {
  210. /* write unaligned page data */
  211. if (unaligned_len > len)
  212. write_size = len;
  213. else
  214. write_size = unaligned_len;
  215. unaligned_len = 0;
  216. } else if (remaining < SPINOR_WRITE_PAGE_SIZE)
  217. write_size = remaining;
  218. else
  219. write_size = SPINOR_WRITE_PAGE_SIZE;
  220. spinor_write_internal(sni, addr, data, write_size);
  221. addr += write_size;
  222. data = (unsigned char *)data+write_size;
  223. remaining -= write_size;
  224. }
  225. return 0;
  226. }
  227. _nor_fun int spinor_write_with_randomizer(struct spinor_info *sni, unsigned int addr, const void *data, int len)
  228. {
  229. struct spi_info *si;
  230. u32_t key, page_addr, origin_spi_ctl;
  231. int wlen;
  232. unsigned char addr_len;
  233. unsigned char cmd;
  234. if (!sni)
  235. sni = (struct spinor_info *)&system_spi_nor;
  236. si = &sni->spi;
  237. key = irq_lock(); //ota diff upgrade, must be irq lock
  238. origin_spi_ctl = spi_read(si, SSPI_CTL);
  239. spimem_set_write_protect(si, 0);
  240. page_addr = (addr + len) & ~SPINOR_WRITE_PAGE_MASK;
  241. if ((addr & ~SPINOR_WRITE_PAGE_MASK) != page_addr) {
  242. /* data cross write page bound, need split */
  243. wlen = page_addr - addr;
  244. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  245. addr_len = 4;
  246. cmd = SPINOR_CMD_WRITE_PAGE_4B;
  247. } else {
  248. addr_len = 3;
  249. cmd = SPINOR_CMD_WRITE_PAGE;
  250. }
  251. spimem_transfer(si, cmd, addr, addr_len, (u8_t *)data, wlen,
  252. 0, SPIMEM_TFLAG_WRITE_DATA | SPIMEM_TFLAG_ENABLE_RANDOMIZE |
  253. SPIMEM_TFLAG_PAUSE_RANDOMIZE);
  254. spinor_wait_ready(sni);
  255. data = (unsigned char *)data + wlen;
  256. len -= wlen;
  257. addr = page_addr;
  258. spimem_set_write_protect(si, 0);
  259. }
  260. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  261. addr_len = 4;
  262. cmd = SPINOR_CMD_WRITE_PAGE_4B;
  263. } else {
  264. addr_len = 3;
  265. cmd = SPINOR_CMD_WRITE_PAGE;
  266. }
  267. spimem_transfer(si, cmd, addr, addr_len, (u8_t *)data, len,
  268. 0, SPIMEM_TFLAG_WRITE_DATA | SPIMEM_TFLAG_ENABLE_RANDOMIZE | SPIMEM_TFLAG_RESUME_RANDOMIZE);
  269. spinor_wait_ready(sni);
  270. spi_write(si, SSPI_CTL, origin_spi_ctl);
  271. spi_delay();
  272. irq_unlock(key);
  273. return 0;
  274. }
  275. #ifdef CONFIG_NOR_SECURIYT_SUSPPORT
  276. _nor_fun int spinor_erase_security(struct spinor_info *sni, unsigned int addr)
  277. {
  278. struct spi_info *si;
  279. u32_t key, origin_spi_ctl;
  280. si = &sni->spi;
  281. key = irq_lock(); //ota diff upgrade, must be irq lock
  282. origin_spi_ctl = spi_read(si, SSPI_CTL);
  283. spimem_set_write_protect(si, 0);
  284. spimem_transfer(si, SPINOR_CMD_SECURITY_ERASE, addr, 3, 0, 0, 0, SPIMEM_TFLAG_WRITE_DATA);
  285. spinor_wait_ready(sni);
  286. spi_write(si, SSPI_CTL, origin_spi_ctl);
  287. spi_delay();
  288. irq_unlock(key);
  289. return 0;
  290. }
  291. _nor_fun int spinor_write_security(struct spinor_info *sni, unsigned int addr, const void *data, int len)
  292. {
  293. struct spi_info *si;
  294. u32_t key, origin_spi_ctl;
  295. si = &sni->spi;
  296. key = irq_lock();
  297. origin_spi_ctl = spi_read(si, SSPI_CTL);
  298. spimem_set_write_protect(si, 0);
  299. spimem_transfer(si, SPINOR_CMD_SECURITY_PROGRAM, addr, 3, (u8_t *)data, len, 0, SPIMEM_TFLAG_WRITE_DATA);
  300. spinor_wait_ready(sni);
  301. spi_write(si, SSPI_CTL, origin_spi_ctl);
  302. spi_delay();
  303. irq_unlock(key);
  304. return 0;
  305. }
  306. _nor_fun int spinor_read_security(struct spinor_info *sni, unsigned int addr, void *data, int len)
  307. {
  308. struct spi_info *si;
  309. u32_t key, origin_spi_ctl;
  310. si = &sni->spi;
  311. key = irq_lock();
  312. origin_spi_ctl = spi_read(si, SSPI_CTL);
  313. spimem_transfer(si, SPINOR_CMD_SECURITY_READ, addr, 3, (u8_t *)data, len, 1, 0);
  314. spi_write(si, SSPI_CTL, origin_spi_ctl);
  315. irq_unlock(key);
  316. return 0;
  317. }
  318. _nor_fun int spinor_read_uid(struct spinor_info *sni, void *data, int len)
  319. {
  320. struct spi_info *si;
  321. u32_t key, origin_spi_ctl;
  322. si = &sni->spi;
  323. key = irq_lock();
  324. origin_spi_ctl = spi_read(si, SSPI_CTL);
  325. spimem_transfer(si, SPINOR_CMD_UID_READ, 0, 3, (u8_t *)data, len, 1, 0);
  326. spi_write(si, SSPI_CTL, origin_spi_ctl);
  327. irq_unlock(key);
  328. return 0;
  329. }
  330. #endif
  331. _nor_fun int spinor_enter_4byte_address_mode(struct spinor_info *sni)
  332. {
  333. #ifdef CONFIG_NOR_CODE_IN_RAM
  334. printk("spinor code in ram\n");
  335. return 0;
  336. #else
  337. printk("spinor enter 4-byte address mode\n");
  338. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  339. return spimem_transfer(&sni->spi, SPINOR_CMD_EN4B, 0, 0, NULL, 0, 0, 0);
  340. #endif
  341. }
  342. _nor_fun int spinor_erase(struct spinor_info *sni, unsigned int addr, int size)
  343. {
  344. int remaining, erase_size;
  345. unsigned char cmd;
  346. if (!size)
  347. return 0;
  348. if (addr & SPINOR_ERASE_SECTOR_MASK || size & SPINOR_ERASE_SECTOR_MASK)
  349. return -1;
  350. if (!sni)
  351. sni = (struct spinor_info *)&system_spi_nor;
  352. /* write aligned page data */
  353. remaining = size;
  354. while (remaining > 0) {
  355. if (addr & SPINOR_ERASE_BLOCK_MASK || remaining < SPINOR_ERASE_BLOCK_SIZE) {
  356. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  357. cmd = SPINOR_CMD_ERASE_SECTOR_4B;
  358. } else {
  359. cmd = SPINOR_CMD_ERASE_SECTOR;
  360. }
  361. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  362. } else {
  363. if (sni->flag & SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN) {
  364. cmd = SPINOR_CMD_ERASE_BLOCK_4B;
  365. } else {
  366. cmd = SPINOR_CMD_ERASE_BLOCK;
  367. }
  368. erase_size = SPINOR_ERASE_BLOCK_SIZE;
  369. }
  370. spinor_erase_internal(sni, cmd, addr);
  371. addr += erase_size;
  372. remaining -= erase_size;
  373. }
  374. return 0;
  375. }
  376. _nor_fun unsigned int spinor_read_chipid(struct spinor_info *sni)
  377. {
  378. unsigned int chipid;
  379. if (!sni)
  380. sni = (struct spinor_info *)&system_spi_nor;
  381. spimem_read_chipid(&sni->spi, &chipid, 3);
  382. return chipid;
  383. }
  384. _nor_fun static int spinor_write_vol_cfg(struct spinor_info *sni, unsigned int addr, const void *data, int len)
  385. {
  386. struct spi_info *si;
  387. u32_t key, origin_spi_ctl;
  388. si = &sni->spi;
  389. key = irq_lock();
  390. origin_spi_ctl = spi_read(si, SSPI_CTL);
  391. spimem_set_write_protect(si, 0);
  392. spimem_transfer(si, SPINOR_CMD_WR_VOL_CFG, addr, 3, (u8_t *)data, len, 0, SPIMEM_TFLAG_WRITE_DATA);
  393. spinor_wait_ready(sni);
  394. spi_write(si, SSPI_CTL, origin_spi_ctl);
  395. spi_delay();
  396. irq_unlock(key);
  397. return 0;
  398. }
  399. _nor_fun static int spinor_read_vol_cfg(struct spinor_info *sni, unsigned int addr, void *data, int len)
  400. {
  401. struct spi_info *si;
  402. u32_t key, origin_spi_ctl;
  403. si = &sni->spi;
  404. key = irq_lock();
  405. origin_spi_ctl = spi_read(si, SSPI_CTL);
  406. spimem_transfer(si, SPINOR_CMD_RD_VOL_CFG, addr, 3, (u8_t *)data, len, 1, 0);
  407. spi_write(si, SSPI_CTL, origin_spi_ctl);
  408. irq_unlock(key);
  409. return 0;
  410. }
  411. #define GD25B512ME_CHIPID 0x1a47c8
  412. void spinor_enable_xip_mode(struct spinor_info *sni)
  413. {
  414. uint8_t ncfg;
  415. if(sni->chipid == GD25B512ME_CHIPID){
  416. spinor_read_vol_cfg(sni, 6, &ncfg, 1);
  417. //printk("b vol cfg=0x%x\n", ncfg);
  418. if(ncfg & 0x1) {
  419. ncfg &=0xfe;/*xip enable*/
  420. spinor_write_vol_cfg(sni, 6, &ncfg, 1);
  421. ncfg = 0x0;
  422. }
  423. spinor_read_vol_cfg(sni, 6, &ncfg, 1);
  424. //printk("vol cfg=0x%x\n", ncfg);
  425. }
  426. }
  427. #ifdef CONFIG_NOR_CODE_IN_RAM
  428. #include "soc.h"
  429. #define XIP_CODE_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
  430. #define NOR_3B_ADDR_MAXLEN (1<<24) // 16MB
  431. static unsigned int xip_nor_offset;
  432. unsigned int spi_nor_get_xip_offset(void)
  433. {
  434. return xip_nor_offset;
  435. }
  436. #define PY25Q256HB_CHIPID 0x192085
  437. _nor_fun static void spinor_write_ext_addr_reg(struct spinor_info *sni, unsigned char ex_addr)
  438. {
  439. spimem_set_write_protect(&sni->spi, 0);
  440. spimem_write_cmd_addr(&sni->spi, SPINOR_CMD_WR_EXTADDR, ex_addr, 1);
  441. }
  442. _nor_fun static void spinor_4byte_address_mode(struct spinor_info *sni, bool enter)
  443. {
  444. if(enter){
  445. sni->flag |= SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  446. //spimem_write_cmd(&sni->spi, SPINOR_CMD_EN4B);
  447. }else{
  448. sni->flag &= ~SPINOR_FLAG_4BYTE_ADDRESS_MODE_EN;
  449. //spimem_write_cmd(&sni->spi, SPINOR_CMD_EXIT4B);
  450. if(sni->chipid == PY25Q256HB_CHIPID) // fix PY25Q256HB nor ic bug
  451. spinor_write_ext_addr_reg(sni, 0);
  452. }
  453. }
  454. #ifdef CONFIG_SPINOR_TEST_DELAYCHAIN
  455. #undef CONFIG_NOR_SUSPEND_RESUME
  456. #else
  457. #define CONFIG_NOR_XIP_READ
  458. #endif
  459. #ifdef CONFIG_NOR_XIP_READ
  460. int spinor_xip_read(unsigned int addr, void *data, int len)
  461. {
  462. unsigned int xip_start;
  463. xip_start = 0x10000000 + addr - xip_nor_offset;
  464. pbrom_libc_api->p_memcpy(data, (void *)xip_start, len);
  465. return 0;
  466. }
  467. _nor_fun int spinor_ram_read(struct spinor_info *sni, unsigned int addr, void *data, int len)
  468. {
  469. int ret;
  470. unsigned int tlen;
  471. uint32_t key;
  472. if(addr < xip_nor_offset){
  473. tlen = xip_nor_offset - addr;
  474. if(tlen > len)
  475. tlen = len;
  476. key = irq_lock();
  477. spinor_read(sni, addr, data, tlen);
  478. irq_unlock(key);
  479. data =(void *) ((unsigned int)data + tlen);
  480. len -= tlen;
  481. addr += tlen;
  482. }
  483. if(len <= 0)
  484. return 0;
  485. if(addr+len > NOR_3B_ADDR_MAXLEN){
  486. if(addr < NOR_3B_ADDR_MAXLEN){
  487. tlen = NOR_3B_ADDR_MAXLEN - addr;
  488. spinor_xip_read(addr, data, tlen);
  489. data =(void *) ((unsigned int)data + tlen);
  490. len -= tlen;
  491. addr = NOR_3B_ADDR_MAXLEN;
  492. }
  493. key = irq_lock();
  494. spinor_4byte_address_mode(sni, true);
  495. ret = spinor_read(sni, addr, data, len);
  496. spinor_4byte_address_mode(sni, false);
  497. irq_unlock(key);
  498. }else{
  499. ret = spinor_xip_read(addr, data, len);
  500. }
  501. return ret;
  502. }
  503. static int spinor_xip_init(const struct device *arg)
  504. {
  505. xip_nor_offset = soc_boot_get_info()->nor_offset & (NOR_3B_ADDR_MAXLEN-1);
  506. printk("xip_nor_offset=0x%x-0x%x\n", xip_nor_offset, XIP_CODE_ADDR);
  507. return 0;
  508. }
  509. SYS_INIT(spinor_xip_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
  510. #else
  511. _nor_fun int spinor_ram_read(struct spinor_info *sni, unsigned int addr, void *data, int len)
  512. {
  513. int ret;
  514. uint32_t key;
  515. key = irq_lock();
  516. if(addr+len > NOR_3B_ADDR_MAXLEN)
  517. spinor_4byte_address_mode(sni, true);
  518. ret = spinor_read(sni, addr, data, len);
  519. if(addr+len > NOR_3B_ADDR_MAXLEN)
  520. spinor_4byte_address_mode(sni, false);
  521. irq_unlock(key);
  522. return ret;
  523. }
  524. #endif
  525. #ifdef CONFIG_NOR_SUSPEND_RESUME
  526. _nor_fun static void spinor_suspend(struct spinor_info *sni)
  527. {
  528. int i, j;
  529. // program/erase suspend
  530. for(j = 0; j < 3; j++){
  531. spimem_write_cmd(&sni->spi, SPINOR_CMD_PROGRAM_ERASE_SUSPEND);
  532. for(i = 0; i < 100; i++) { //max 500us, tSUS must 30us
  533. soc_udelay(5);
  534. if (0 == (spinor_read_status(sni, SPINOR_CMD_READ_STATUS) & 0x1)){
  535. break;
  536. }
  537. }
  538. if(i != 100){
  539. break;
  540. }
  541. }
  542. }
  543. _nor_fun static bool spinor_resume_and_check_idle(struct spinor_info *sni)
  544. {
  545. bool ret;
  546. uint32_t key, i;
  547. key = irq_lock();
  548. // program/erase resum
  549. spimem_write_cmd(&sni->spi, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  550. for(i = 0; i < 100; i++){ // wait to exit suspend
  551. soc_udelay(5);
  552. if (0 == (spinor_read_status(sni, SPINOR_CMD_READ_STATUS2) & 0x80)){
  553. break;
  554. }
  555. }
  556. if (0 == (spinor_read_status(sni, SPINOR_CMD_READ_STATUS) & 0x1)) {
  557. ret = true;
  558. }else {
  559. for(i = 0; i < 10; i++){ // handle 500 us
  560. soc_udelay(50);
  561. if (0 == (spinor_read_status(sni, SPINOR_CMD_READ_STATUS) & 0x1)){
  562. break;
  563. }
  564. }
  565. if(i != 10){
  566. ret = true;
  567. }else{
  568. spinor_suspend(sni);
  569. ret = false;
  570. }
  571. }
  572. irq_unlock(key);
  573. return ret;
  574. }
  575. _nor_fun static void spinor_wait_finished(struct spinor_info *sni)
  576. {
  577. int i;
  578. for(i = 0; i < 2000; i++){ //2000*500us= 1000ms overtimer
  579. if (spinor_resume_and_check_idle(sni))
  580. break;
  581. if(!k_is_in_isr()){
  582. if((i & 0x1) == 0)
  583. k_msleep(1);
  584. }
  585. }
  586. if(i == 2000){
  587. printk("nor resume error\n");
  588. }
  589. }
  590. K_MUTEX_DEFINE(spinor_w_mutex);
  591. _nor_fun void spinor_resume_finished(struct spinor_info *sni)
  592. {
  593. printk("nor is suspend, wait resume finished\n");
  594. spimem_write_cmd(&sni->spi, SPINOR_CMD_PROGRAM_ERASE_RESUME);
  595. soc_udelay(5);
  596. spinor_wait_ready(sni);
  597. }
  598. #endif
  599. _nor_fun int spinor_ram_write(struct spinor_info *sni, unsigned int addr,
  600. const void *data, int len)
  601. {
  602. int ret = 0;
  603. uint32_t key;
  604. int wlen;
  605. #ifdef CONFIG_NOR_SUSPEND_RESUME
  606. k_mutex_lock(&spinor_w_mutex, K_FOREVER);
  607. spinor_wait_finished(sni);
  608. #endif
  609. while(len > 0) {
  610. if(len > SPINOR_WRITE_PAGE_SIZE)
  611. wlen = SPINOR_WRITE_PAGE_SIZE;
  612. else
  613. wlen = len;
  614. key = irq_lock();
  615. if(addr+wlen > NOR_3B_ADDR_MAXLEN)
  616. spinor_4byte_address_mode(sni, true);
  617. ret = spinor_write(sni, addr, data, wlen);
  618. if(addr+wlen > NOR_3B_ADDR_MAXLEN)
  619. spinor_4byte_address_mode(sni, false);
  620. #ifdef CONFIG_SPINOR_TEST_DELAYCHAIN
  621. soc_udelay(50000); // try fail, nor status may not finished, delay erase finished
  622. #endif
  623. irq_unlock(key);
  624. addr += wlen;
  625. data = (void *)((unsigned int )data + wlen);
  626. len -= wlen;
  627. }
  628. soc_memctrl_cache_invalid();
  629. #ifdef CONFIG_NOR_SUSPEND_RESUME
  630. k_mutex_unlock(&spinor_w_mutex);
  631. #endif
  632. return ret;
  633. }
  634. #ifdef CONFIG_NOR_SUSPEND_RESUME
  635. //#define NOR_ERASE_CHECK
  636. #endif
  637. #ifdef NOR_ERASE_CHECK
  638. _nor_fun int spinor_erase_chcek(struct spinor_info *sni,unsigned int addr, int len)
  639. {
  640. unsigned int i, j;
  641. unsigned int *p;
  642. if(addr < xip_nor_offset || len <= 0 || (addr+len > NOR_3B_ADDR_MAXLEN)){
  643. return 0;
  644. }
  645. p = (unsigned int *)(0x10000000 + addr - xip_nor_offset);
  646. for(i = 0; i < len/4; i++) {
  647. if(p[i] != 0xffffffff){
  648. printk("nor offset=0x%x, 0x%x!=0xffffffff\n", addr+i*4, p[i]);
  649. printk("status=0x%x 0x%x\n", spinor_read_status(sni, SPINOR_CMD_READ_STATUS), spinor_read_status(sni, SPINOR_CMD_READ_STATUS2));
  650. for(j = 0; j < 16; j++)
  651. printk("%d 0x%x=0x%x\n",j, addr+(i+j-8)*4, p[i+j-8]);
  652. k_panic();
  653. }
  654. }
  655. printk("erase check ok, off=0x%x,len=0x%x\n", addr, len);
  656. return 0;
  657. }
  658. #endif
  659. _nor_fun int spinor_ram_erase(struct spinor_info *sni, unsigned int addr, int len)
  660. {
  661. int ret = 0;
  662. uint32_t key;
  663. int erase_size = 0;
  664. uint32_t t0,t1, t2;
  665. #ifdef CONFIG_NOR_SUSPEND_RESUME
  666. bool b_suspend = true;
  667. #endif
  668. #ifdef NOR_ERASE_CHECK
  669. unsigned int bak_len, bak_addr;
  670. bak_len = len;
  671. bak_addr = addr;
  672. #endif
  673. //printk("nor_e:off=0x%x,len=0x%x\n", addr, len);
  674. #ifdef CONFIG_NOR_SUSPEND_RESUME
  675. if(len >= SPINOR_ERASE_BLOCK_SIZE*4) // erase 256kb, not suspend &resume
  676. b_suspend = false;
  677. k_mutex_lock(&spinor_w_mutex, K_FOREVER);
  678. spinor_wait_finished(sni);
  679. if(b_suspend)
  680. sni->flag |= SPINOR_FLAG_NO_WAIT_READY;
  681. #endif
  682. while (len > 0) {
  683. if (len < SPINOR_ERASE_BLOCK_SIZE) {
  684. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  685. } else if (addr & SPINOR_ERASE_BLOCK_MASK) {
  686. erase_size = SPINOR_ERASE_SECTOR_SIZE;
  687. } else {
  688. erase_size = SPINOR_ERASE_BLOCK_SIZE;
  689. }
  690. key = irq_lock();
  691. t0 = k_cycle_get_32();
  692. if(addr >= NOR_3B_ADDR_MAXLEN)
  693. spinor_4byte_address_mode(sni, true);
  694. ret = spinor_erase(sni, addr, erase_size);
  695. #ifdef CONFIG_NOR_SUSPEND_RESUME
  696. if(b_suspend)
  697. spinor_suspend(sni);
  698. #endif
  699. if(addr >= NOR_3B_ADDR_MAXLEN)
  700. spinor_4byte_address_mode(sni, false);
  701. t1 = k_cycle_get_32();
  702. #ifdef CONFIG_SPINOR_TEST_DELAYCHAIN
  703. soc_udelay(100000); // try fail, nor status may not finished, delay erase finished
  704. #endif
  705. irq_unlock(key);
  706. #ifdef CONFIG_NOR_SUSPEND_RESUME
  707. if(b_suspend)
  708. spinor_wait_finished(sni);
  709. #endif
  710. t2 = k_cycle_get_32();
  711. //printk("nor_e:off=0x%x,len=0x%x, tran=%d us, wait=%d\n",addr, erase_size,
  712. //k_cyc_to_us_ceil32(t1-t0), k_cyc_to_us_ceil32(t2-t1));
  713. len -= erase_size;
  714. addr += erase_size;
  715. }
  716. soc_memctrl_cache_invalid();
  717. #ifdef CONFIG_NOR_SUSPEND_RESUME
  718. sni->flag &= ~SPINOR_FLAG_NO_WAIT_READY;
  719. k_mutex_unlock(&spinor_w_mutex);
  720. #endif
  721. #ifdef NOR_ERASE_CHECK
  722. spinor_erase_chcek(sni, bak_addr,bak_len);
  723. #endif
  724. return ret;
  725. }
  726. const struct spinor_operation_api spinor_4b_addr_op_api = {
  727. .read_chipid = spinor_read_chipid,
  728. .read_status = spinor_read_status,
  729. .write_status = spinor_write_status,
  730. .read = spinor_ram_read,
  731. .write = spinor_ram_write,
  732. .erase = spinor_ram_erase,
  733. };
  734. #if IS_ENABLED(CONFIG_SPI_FLASH_2)
  735. const struct spinor_operation_api spinor_3_addr_op_api = {
  736. .read_chipid = spinor_read_chipid,
  737. .read_status = spinor_read_status,
  738. .write_status = spinor_write_status,
  739. .read = spinor_read,
  740. .write = spinor_write,
  741. .erase = spinor_erase,
  742. };
  743. const struct spinor_operation_api *spi3nor_get_api(void)
  744. {
  745. return &spinor_3_addr_op_api;
  746. }
  747. #endif
  748. #else
  749. const struct spinor_operation_api spinor_4b_addr_op_api = {
  750. .read_chipid = spinor_read_chipid,
  751. .read_status = spinor_read_status,
  752. .write_status = spinor_write_status,
  753. .read = spinor_read,
  754. .write = spinor_write,
  755. .erase = spinor_erase,
  756. };
  757. #if IS_ENABLED(CONFIG_SPI_FLASH_2)
  758. const struct spinor_operation_api *spi3nor_get_api(void)
  759. {
  760. return &spinor_4b_addr_op_api;
  761. }
  762. #endif
  763. #endif