nvs.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. /* NVS: non volatile storage in flash
  2. *
  3. * Copyright (c) 2018 Laczen
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. */
  7. #include <drivers/flash.h>
  8. #include <string.h>
  9. #include <errno.h>
  10. #include <inttypes.h>
  11. #include <fs/nvs.h>
  12. #include <sys/crc.h>
  13. #include "nvs_priv.h"
  14. #include <logging/log.h>
  15. LOG_MODULE_REGISTER(fs_nvs, CONFIG_NVS_LOG_LEVEL);
  16. /* basic routines */
  17. /* nvs_al_size returns size aligned to fs->write_block_size */
  18. static inline size_t nvs_al_size(struct nvs_fs *fs, size_t len)
  19. {
  20. uint8_t write_block_size = fs->flash_parameters->write_block_size;
  21. if (write_block_size <= 1U) {
  22. return len;
  23. }
  24. return (len + (write_block_size - 1U)) & ~(write_block_size - 1U);
  25. }
  26. /* end basic routines */
  27. /* flash routines */
  28. /* basic aligned flash write to nvs address */
  29. static int nvs_flash_al_wrt(struct nvs_fs *fs, uint32_t addr, const void *data,
  30. size_t len)
  31. {
  32. const uint8_t *data8 = (const uint8_t *)data;
  33. int rc = 0;
  34. off_t offset;
  35. size_t blen;
  36. uint8_t buf[NVS_BLOCK_SIZE];
  37. if (!len) {
  38. /* Nothing to write, avoid changing the flash protection */
  39. return 0;
  40. }
  41. offset = fs->offset;
  42. offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
  43. offset += addr & ADDR_OFFS_MASK;
  44. blen = len & ~(fs->flash_parameters->write_block_size - 1U);
  45. if (blen > 0) {
  46. rc = flash_write(fs->flash_device, offset, data8, blen);
  47. if (rc) {
  48. /* flash write error */
  49. goto end;
  50. }
  51. len -= blen;
  52. offset += blen;
  53. data8 += blen;
  54. }
  55. if (len) {
  56. memcpy(buf, data8, len);
  57. (void)memset(buf + len, fs->flash_parameters->erase_value,
  58. fs->flash_parameters->write_block_size - len);
  59. rc = flash_write(fs->flash_device, offset, buf,
  60. fs->flash_parameters->write_block_size);
  61. }
  62. end:
  63. return rc;
  64. }
  65. /* basic flash read from nvs address */
  66. static int nvs_flash_rd(struct nvs_fs *fs, uint32_t addr, void *data,
  67. size_t len)
  68. {
  69. int rc;
  70. off_t offset;
  71. offset = fs->offset;
  72. offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
  73. offset += addr & ADDR_OFFS_MASK;
  74. rc = flash_read(fs->flash_device, offset, data, len);
  75. return rc;
  76. }
  77. /* allocation entry write */
  78. static int nvs_flash_ate_wrt(struct nvs_fs *fs, const struct nvs_ate *entry)
  79. {
  80. int rc;
  81. rc = nvs_flash_al_wrt(fs, fs->ate_wra, entry,
  82. sizeof(struct nvs_ate));
  83. fs->ate_wra -= nvs_al_size(fs, sizeof(struct nvs_ate));
  84. return rc;
  85. }
  86. /* data write */
  87. static int nvs_flash_data_wrt(struct nvs_fs *fs, const void *data, size_t len)
  88. {
  89. int rc;
  90. rc = nvs_flash_al_wrt(fs, fs->data_wra, data, len);
  91. fs->data_wra += nvs_al_size(fs, len);
  92. return rc;
  93. }
  94. /* flash ate read */
  95. static int nvs_flash_ate_rd(struct nvs_fs *fs, uint32_t addr,
  96. struct nvs_ate *entry)
  97. {
  98. return nvs_flash_rd(fs, addr, entry, sizeof(struct nvs_ate));
  99. }
  100. /* end of basic flash routines */
  101. /* advanced flash routines */
  102. /* nvs_flash_block_cmp compares the data in flash at addr to data
  103. * in blocks of size NVS_BLOCK_SIZE aligned to fs->write_block_size
  104. * returns 0 if equal, 1 if not equal, errcode if error
  105. */
  106. static int nvs_flash_block_cmp(struct nvs_fs *fs, uint32_t addr, const void *data,
  107. size_t len)
  108. {
  109. const uint8_t *data8 = (const uint8_t *)data;
  110. int rc;
  111. size_t bytes_to_cmp, block_size;
  112. uint8_t buf[NVS_BLOCK_SIZE];
  113. block_size =
  114. NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
  115. while (len) {
  116. bytes_to_cmp = MIN(block_size, len);
  117. rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
  118. if (rc) {
  119. return rc;
  120. }
  121. rc = memcmp(data8, buf, bytes_to_cmp);
  122. if (rc) {
  123. return 1;
  124. }
  125. len -= bytes_to_cmp;
  126. addr += bytes_to_cmp;
  127. data8 += bytes_to_cmp;
  128. }
  129. return 0;
  130. }
  131. /* nvs_flash_cmp_const compares the data in flash at addr to a constant
  132. * value. returns 0 if all data in flash is equal to value, 1 if not equal,
  133. * errcode if error
  134. */
  135. static int nvs_flash_cmp_const(struct nvs_fs *fs, uint32_t addr, uint8_t value,
  136. size_t len)
  137. {
  138. int rc;
  139. size_t bytes_to_cmp, block_size;
  140. uint8_t cmp[NVS_BLOCK_SIZE];
  141. block_size =
  142. NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
  143. (void)memset(cmp, value, block_size);
  144. while (len) {
  145. bytes_to_cmp = MIN(block_size, len);
  146. rc = nvs_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
  147. if (rc) {
  148. return rc;
  149. }
  150. len -= bytes_to_cmp;
  151. addr += bytes_to_cmp;
  152. }
  153. return 0;
  154. }
  155. /* flash block move: move a block at addr to the current data write location
  156. * and updates the data write location.
  157. */
  158. static int nvs_flash_block_move(struct nvs_fs *fs, uint32_t addr, size_t len)
  159. {
  160. int rc;
  161. size_t bytes_to_copy, block_size;
  162. uint8_t buf[NVS_BLOCK_SIZE];
  163. block_size =
  164. NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
  165. while (len) {
  166. bytes_to_copy = MIN(block_size, len);
  167. rc = nvs_flash_rd(fs, addr, buf, bytes_to_copy);
  168. if (rc) {
  169. return rc;
  170. }
  171. rc = nvs_flash_data_wrt(fs, buf, bytes_to_copy);
  172. if (rc) {
  173. return rc;
  174. }
  175. len -= bytes_to_copy;
  176. addr += bytes_to_copy;
  177. }
  178. return 0;
  179. }
  180. /* erase a sector and verify erase was OK.
  181. * return 0 if OK, errorcode on error.
  182. */
  183. static int nvs_flash_erase_sector(struct nvs_fs *fs, uint32_t addr)
  184. {
  185. int rc;
  186. off_t offset;
  187. addr &= ADDR_SECT_MASK;
  188. offset = fs->offset;
  189. offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
  190. LOG_DBG("Erasing flash at %lx, len %d", (long int) offset,
  191. fs->sector_size);
  192. rc = flash_erase(fs->flash_device, offset, fs->sector_size);
  193. if (rc) {
  194. return rc;
  195. }
  196. if (nvs_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value,
  197. fs->sector_size)) {
  198. rc = -ENXIO;
  199. }
  200. return rc;
  201. }
  202. /* crc update on allocation entry */
  203. static void nvs_ate_crc8_update(struct nvs_ate *entry)
  204. {
  205. uint8_t crc8;
  206. crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
  207. entry->crc8 = crc8;
  208. }
  209. /* crc check on allocation entry
  210. * returns 0 if OK, 1 on crc fail
  211. */
  212. static int nvs_ate_crc8_check(const struct nvs_ate *entry)
  213. {
  214. uint8_t crc8;
  215. crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
  216. if (crc8 == entry->crc8) {
  217. return 0;
  218. }
  219. return 1;
  220. }
  221. /* nvs_ate_cmp_const compares an ATE to a constant value. returns 0 if
  222. * the whole ATE is equal to value, 1 if not equal.
  223. */
  224. static int nvs_ate_cmp_const(const struct nvs_ate *entry, uint8_t value)
  225. {
  226. const uint8_t *data8 = (const uint8_t *)entry;
  227. int i;
  228. for (i = 0; i < sizeof(struct nvs_ate); i++) {
  229. if (data8[i] != value) {
  230. return 1;
  231. }
  232. }
  233. return 0;
  234. }
  235. /* nvs_ate_valid validates an ate:
  236. * return 1 if crc8 and offset valid,
  237. * 0 otherwise
  238. */
  239. static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
  240. {
  241. size_t ate_size;
  242. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  243. if ((nvs_ate_crc8_check(entry)) ||
  244. (entry->offset >= (fs->sector_size - ate_size))) {
  245. return 0;
  246. }
  247. return 1;
  248. }
  249. /* nvs_close_ate_valid validates an sector close ate: a valid sector close ate:
  250. * - valid ate
  251. * - len = 0 and id = 0xFFFF
  252. * - offset points to location at ate multiple from sector size
  253. * return 1 if valid, 0 otherwise
  254. */
  255. static int nvs_close_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
  256. {
  257. size_t ate_size;
  258. if ((!nvs_ate_valid(fs, entry)) || (entry->len != 0U) ||
  259. (entry->id != 0xFFFF)) {
  260. return 0;
  261. }
  262. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  263. if ((fs->sector_size - entry->offset) % ate_size) {
  264. return 0;
  265. }
  266. return 1;
  267. }
  268. /* store an entry in flash */
  269. static int nvs_flash_wrt_entry(struct nvs_fs *fs, uint16_t id, const void *data,
  270. size_t len)
  271. {
  272. int rc;
  273. struct nvs_ate entry;
  274. size_t ate_size;
  275. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  276. entry.id = id;
  277. entry.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
  278. entry.len = (uint16_t)len;
  279. entry.part = 0xff;
  280. nvs_ate_crc8_update(&entry);
  281. rc = nvs_flash_data_wrt(fs, data, len);
  282. if (rc) {
  283. return rc;
  284. }
  285. rc = nvs_flash_ate_wrt(fs, &entry);
  286. if (rc) {
  287. return rc;
  288. }
  289. return 0;
  290. }
  291. /* end of flash routines */
  292. /* If the closing ate is invalid, its offset cannot be trusted and
  293. * the last valod ate of the sector should instead try to be recovered by going
  294. * through all ate's.
  295. *
  296. * addr should point to the faulty closing ate and will be updated to the last
  297. * valid ate. If no valid ate is found it will be left untouched.
  298. */
  299. static int nvs_recover_last_ate(struct nvs_fs *fs, uint32_t *addr)
  300. {
  301. uint32_t data_end_addr, ate_end_addr;
  302. struct nvs_ate end_ate;
  303. size_t ate_size;
  304. int rc;
  305. LOG_DBG("Recovering last ate from sector %d",
  306. (*addr >> ADDR_SECT_SHIFT));
  307. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  308. *addr -= ate_size;
  309. ate_end_addr = *addr;
  310. data_end_addr = *addr & ADDR_SECT_MASK;
  311. while (ate_end_addr > data_end_addr) {
  312. rc = nvs_flash_ate_rd(fs, ate_end_addr, &end_ate);
  313. if (rc) {
  314. return rc;
  315. }
  316. if (nvs_ate_valid(fs, &end_ate)) {
  317. /* found a valid ate, update data_end_addr and *addr */
  318. data_end_addr &= ADDR_SECT_MASK;
  319. data_end_addr += end_ate.offset + end_ate.len;
  320. *addr = ate_end_addr;
  321. }
  322. ate_end_addr -= ate_size;
  323. }
  324. return 0;
  325. }
  326. /* walking through allocation entry list, from newest to oldest entries
  327. * read ate from addr, modify addr to the previous ate
  328. */
  329. static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate)
  330. {
  331. int rc;
  332. struct nvs_ate close_ate;
  333. size_t ate_size;
  334. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  335. rc = nvs_flash_ate_rd(fs, *addr, ate);
  336. if (rc) {
  337. return rc;
  338. }
  339. *addr += ate_size;
  340. if (((*addr) & ADDR_OFFS_MASK) != (fs->sector_size - ate_size)) {
  341. return 0;
  342. }
  343. /* last ate in sector, do jump to previous sector */
  344. if (((*addr) >> ADDR_SECT_SHIFT) == 0U) {
  345. *addr += ((fs->sector_count - 1) << ADDR_SECT_SHIFT);
  346. } else {
  347. *addr -= (1 << ADDR_SECT_SHIFT);
  348. }
  349. rc = nvs_flash_ate_rd(fs, *addr, &close_ate);
  350. if (rc) {
  351. return rc;
  352. }
  353. rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
  354. /* at the end of filesystem */
  355. if (!rc) {
  356. *addr = fs->ate_wra;
  357. return 0;
  358. }
  359. /* Update the address if the close ate is valid.
  360. */
  361. if (nvs_close_ate_valid(fs, &close_ate)) {
  362. (*addr) &= ADDR_SECT_MASK;
  363. (*addr) += close_ate.offset;
  364. return 0;
  365. }
  366. /* The close_ate was invalid, `lets find out the last valid ate
  367. * and point the address to this found ate.
  368. *
  369. * remark: if there was absolutely no valid data in the sector *addr
  370. * is kept at sector_end - 2*ate_size, the next read will contain
  371. * invalid data and continue with a sector jump
  372. */
  373. return nvs_recover_last_ate(fs, addr);
  374. }
  375. static void nvs_sector_advance(struct nvs_fs *fs, uint32_t *addr)
  376. {
  377. *addr += (1 << ADDR_SECT_SHIFT);
  378. if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) {
  379. *addr -= (fs->sector_count << ADDR_SECT_SHIFT);
  380. }
  381. }
  382. /* allocation entry close (this closes the current sector) by writing offset
  383. * of last ate to the sector end.
  384. */
  385. static int nvs_sector_close(struct nvs_fs *fs)
  386. {
  387. int rc;
  388. struct nvs_ate close_ate;
  389. size_t ate_size;
  390. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  391. close_ate.id = 0xFFFF;
  392. close_ate.len = 0U;
  393. close_ate.offset = (uint16_t)((fs->ate_wra + ate_size) & ADDR_OFFS_MASK);
  394. fs->ate_wra &= ADDR_SECT_MASK;
  395. fs->ate_wra += (fs->sector_size - ate_size);
  396. nvs_ate_crc8_update(&close_ate);
  397. rc = nvs_flash_ate_wrt(fs, &close_ate);
  398. nvs_sector_advance(fs, &fs->ate_wra);
  399. fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
  400. return 0;
  401. }
  402. static int nvs_add_gc_done_ate(struct nvs_fs *fs)
  403. {
  404. struct nvs_ate gc_done_ate;
  405. LOG_DBG("Adding gc done ate at %x", fs->ate_wra & ADDR_OFFS_MASK);
  406. gc_done_ate.id = 0xffff;
  407. gc_done_ate.len = 0U;
  408. gc_done_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
  409. nvs_ate_crc8_update(&gc_done_ate);
  410. return nvs_flash_ate_wrt(fs, &gc_done_ate);
  411. }
  412. /* garbage collection: the address ate_wra has been updated to the new sector
  413. * that has just been started. The data to gc is in the sector after this new
  414. * sector.
  415. */
  416. static int nvs_gc(struct nvs_fs *fs)
  417. {
  418. int rc;
  419. struct nvs_ate close_ate, gc_ate, wlk_ate;
  420. uint32_t sec_addr, gc_addr, gc_prev_addr, wlk_addr, wlk_prev_addr,
  421. data_addr, stop_addr;
  422. size_t ate_size;
  423. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  424. sec_addr = (fs->ate_wra & ADDR_SECT_MASK);
  425. nvs_sector_advance(fs, &sec_addr);
  426. gc_addr = sec_addr + fs->sector_size - ate_size;
  427. /* if the sector is not closed don't do gc */
  428. rc = nvs_flash_ate_rd(fs, gc_addr, &close_ate);
  429. if (rc < 0) {
  430. /* flash error */
  431. return rc;
  432. }
  433. rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
  434. if (!rc) {
  435. goto gc_done;
  436. }
  437. stop_addr = gc_addr - ate_size;
  438. if (nvs_close_ate_valid(fs, &close_ate)) {
  439. gc_addr &= ADDR_SECT_MASK;
  440. gc_addr += close_ate.offset;
  441. } else {
  442. rc = nvs_recover_last_ate(fs, &gc_addr);
  443. if (rc) {
  444. return rc;
  445. }
  446. }
  447. do {
  448. gc_prev_addr = gc_addr;
  449. rc = nvs_prev_ate(fs, &gc_addr, &gc_ate);
  450. if (rc) {
  451. return rc;
  452. }
  453. if (!nvs_ate_valid(fs, &gc_ate)) {
  454. continue;
  455. }
  456. wlk_addr = fs->ate_wra;
  457. do {
  458. wlk_prev_addr = wlk_addr;
  459. rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
  460. if (rc) {
  461. return rc;
  462. }
  463. /* if ate with same id is reached we might need to copy.
  464. * only consider valid wlk_ate's. Something wrong might
  465. * have been written that has the same ate but is
  466. * invalid, don't consider these as a match.
  467. */
  468. if ((wlk_ate.id == gc_ate.id) &&
  469. (nvs_ate_valid(fs, &wlk_ate))) {
  470. break;
  471. }
  472. } while (wlk_addr != fs->ate_wra);
  473. /* if walk has reached the same address as gc_addr copy is
  474. * needed unless it is a deleted item.
  475. */
  476. if ((wlk_prev_addr == gc_prev_addr) && gc_ate.len) {
  477. /* copy needed */
  478. LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len);
  479. data_addr = (gc_prev_addr & ADDR_SECT_MASK);
  480. data_addr += gc_ate.offset;
  481. gc_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
  482. nvs_ate_crc8_update(&gc_ate);
  483. rc = nvs_flash_block_move(fs, data_addr, gc_ate.len);
  484. if (rc) {
  485. return rc;
  486. }
  487. rc = nvs_flash_ate_wrt(fs, &gc_ate);
  488. if (rc) {
  489. return rc;
  490. }
  491. }
  492. } while (gc_prev_addr != stop_addr);
  493. gc_done:
  494. /* Make it possible to detect that gc has finished by writing a
  495. * gc done ate to the sector. In the field we might have nvs systems
  496. * that do not have sufficient space to add this ate, so for these
  497. * situations avoid adding the gc done ate.
  498. */
  499. if (fs->ate_wra >= (fs->data_wra + ate_size)) {
  500. rc = nvs_add_gc_done_ate(fs);
  501. if (rc) {
  502. return rc;
  503. }
  504. }
  505. /* Erase the gc'ed sector */
  506. rc = nvs_flash_erase_sector(fs, sec_addr);
  507. if (rc) {
  508. return rc;
  509. }
  510. return 0;
  511. }
  512. static int nvs_startup(struct nvs_fs *fs)
  513. {
  514. int rc;
  515. struct nvs_ate last_ate;
  516. size_t ate_size, empty_len;
  517. /* Initialize addr to 0 for the case fs->sector_count == 0. This
  518. * should never happen as this is verified in nvs_init() but both
  519. * Coverity and GCC believe the contrary.
  520. */
  521. uint32_t addr = 0U;
  522. uint16_t i, closed_sectors = 0;
  523. uint8_t erase_value = fs->flash_parameters->erase_value;
  524. k_mutex_lock(&fs->nvs_lock, K_FOREVER);
  525. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  526. /* step through the sectors to find a open sector following
  527. * a closed sector, this is where NVS can to write.
  528. */
  529. for (i = 0; i < fs->sector_count; i++) {
  530. addr = (i << ADDR_SECT_SHIFT) +
  531. (uint16_t)(fs->sector_size - ate_size);
  532. rc = nvs_flash_cmp_const(fs, addr, erase_value,
  533. sizeof(struct nvs_ate));
  534. if (rc) {
  535. /* closed sector */
  536. closed_sectors++;
  537. nvs_sector_advance(fs, &addr);
  538. rc = nvs_flash_cmp_const(fs, addr, erase_value,
  539. sizeof(struct nvs_ate));
  540. if (!rc) {
  541. /* open sector */
  542. break;
  543. }
  544. }
  545. }
  546. /* all sectors are closed, this is not a nvs fs */
  547. if (closed_sectors == fs->sector_count) {
  548. rc = -EDEADLK;
  549. goto end;
  550. }
  551. if (i == fs->sector_count) {
  552. /* none of the sectors where closed, in most cases we can set
  553. * the address to the first sector, except when there are only
  554. * two sectors. Then we can only set it to the first sector if
  555. * the last sector contains no ate's. So we check this first
  556. */
  557. rc = nvs_flash_cmp_const(fs, addr - ate_size, erase_value,
  558. sizeof(struct nvs_ate));
  559. if (!rc) {
  560. /* empty ate */
  561. nvs_sector_advance(fs, &addr);
  562. }
  563. }
  564. /* addr contains address of closing ate in the most recent sector,
  565. * search for the last valid ate using the recover_last_ate routine
  566. */
  567. rc = nvs_recover_last_ate(fs, &addr);
  568. if (rc) {
  569. goto end;
  570. }
  571. /* addr contains address of the last valid ate in the most recent sector
  572. * search for the first ate containing all cells erased, in the process
  573. * also update fs->data_wra.
  574. */
  575. fs->ate_wra = addr;
  576. fs->data_wra = addr & ADDR_SECT_MASK;
  577. while (fs->ate_wra >= fs->data_wra) {
  578. rc = nvs_flash_ate_rd(fs, fs->ate_wra, &last_ate);
  579. if (rc) {
  580. goto end;
  581. }
  582. rc = nvs_ate_cmp_const(&last_ate, erase_value);
  583. if (!rc) {
  584. /* found ff empty location */
  585. break;
  586. }
  587. if (nvs_ate_valid(fs, &last_ate)) {
  588. /* complete write of ate was performed */
  589. fs->data_wra = addr & ADDR_SECT_MASK;
  590. /* Align the data write address to the current
  591. * write block size so that it is possible to write to
  592. * the sector even if the block size has changed after
  593. * a software upgrade (unless the physical ATE size
  594. * will change)."
  595. */
  596. fs->data_wra += nvs_al_size(fs, last_ate.offset + last_ate.len);
  597. /* ate on the last possition within the sector is
  598. * reserved for deletion an entry
  599. */
  600. if (fs->ate_wra == fs->data_wra && last_ate.len) {
  601. /* not a delete ate */
  602. rc = -ESPIPE;
  603. goto end;
  604. }
  605. }
  606. fs->ate_wra -= ate_size;
  607. }
  608. /* if the sector after the write sector is not empty gc was interrupted
  609. * we might need to restart gc if it has not yet finished. Otherwise
  610. * just erase the sector.
  611. * When gc needs to be restarted, first erase the sector otherwise the
  612. * data might not fit into the sector.
  613. */
  614. addr = fs->ate_wra & ADDR_SECT_MASK;
  615. nvs_sector_advance(fs, &addr);
  616. rc = nvs_flash_cmp_const(fs, addr, erase_value, fs->sector_size);
  617. if (rc < 0) {
  618. goto end;
  619. }
  620. if (rc) {
  621. /* the sector after fs->ate_wrt is not empty, look for a marker
  622. * (gc_done_ate) that indicates that gc was finished.
  623. */
  624. bool gc_done_marker = false;
  625. struct nvs_ate gc_done_ate;
  626. addr = fs->ate_wra + ate_size;
  627. while ((addr & ADDR_OFFS_MASK) < (fs->sector_size - ate_size)) {
  628. rc = nvs_flash_ate_rd(fs, addr, &gc_done_ate);
  629. if (rc) {
  630. goto end;
  631. }
  632. if (nvs_ate_valid(fs, &gc_done_ate) &&
  633. (gc_done_ate.id == 0xffff) &&
  634. (gc_done_ate.len == 0U)) {
  635. gc_done_marker = true;
  636. break;
  637. }
  638. addr += ate_size;
  639. }
  640. if (gc_done_marker) {
  641. /* erase the next sector */
  642. LOG_INF("GC Done marker found");
  643. addr = fs->ate_wra & ADDR_SECT_MASK;
  644. nvs_sector_advance(fs, &addr);
  645. rc = nvs_flash_erase_sector(fs, addr);
  646. goto end;
  647. }
  648. LOG_INF("No GC Done marker found: restarting gc");
  649. rc = nvs_flash_erase_sector(fs, fs->ate_wra);
  650. if (rc) {
  651. goto end;
  652. }
  653. fs->ate_wra &= ADDR_SECT_MASK;
  654. fs->ate_wra += (fs->sector_size - 2 * ate_size);
  655. fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK);
  656. rc = nvs_gc(fs);
  657. goto end;
  658. }
  659. /* possible data write after last ate write, update data_wra */
  660. while (fs->ate_wra > fs->data_wra) {
  661. empty_len = fs->ate_wra - fs->data_wra;
  662. rc = nvs_flash_cmp_const(fs, fs->data_wra, erase_value,
  663. empty_len);
  664. if (rc < 0) {
  665. goto end;
  666. }
  667. if (!rc) {
  668. break;
  669. }
  670. fs->data_wra += fs->flash_parameters->write_block_size;
  671. }
  672. /* If the ate_wra is pointing to the first ate write location in a
  673. * sector and data_wra is not 0, erase the sector as it contains no
  674. * valid data (this also avoids closing a sector without any data).
  675. */
  676. if (((fs->ate_wra + 2 * ate_size) == fs->sector_size) &&
  677. (fs->data_wra != (fs->ate_wra & ADDR_SECT_MASK))) {
  678. rc = nvs_flash_erase_sector(fs, fs->ate_wra);
  679. if (rc) {
  680. goto end;
  681. }
  682. fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
  683. }
  684. end:
  685. /* If the sector is empty add a gc done ate to avoid having insufficient
  686. * space when doing gc.
  687. */
  688. if ((!rc) && ((fs->ate_wra & ADDR_OFFS_MASK) ==
  689. (fs->sector_size - 2 * ate_size))) {
  690. rc = nvs_add_gc_done_ate(fs);
  691. }
  692. k_mutex_unlock(&fs->nvs_lock);
  693. return rc;
  694. }
  695. int nvs_clear(struct nvs_fs *fs)
  696. {
  697. int rc;
  698. uint32_t addr;
  699. if (!fs->ready) {
  700. LOG_ERR("NVS not initialized");
  701. return -EACCES;
  702. }
  703. for (uint16_t i = 0; i < fs->sector_count; i++) {
  704. addr = i << ADDR_SECT_SHIFT;
  705. rc = nvs_flash_erase_sector(fs, addr);
  706. if (rc) {
  707. return rc;
  708. }
  709. }
  710. /* nvs needs to be reinitialized after clearing */
  711. fs->ready = false;
  712. return 0;
  713. }
  714. int nvs_init(struct nvs_fs *fs, const char *dev_name)
  715. {
  716. int rc;
  717. struct flash_pages_info info;
  718. size_t write_block_size;
  719. k_mutex_init(&fs->nvs_lock);
  720. fs->flash_device = device_get_binding(dev_name);
  721. if (!fs->flash_device) {
  722. LOG_ERR("No valid flash device found");
  723. return -ENXIO;
  724. }
  725. fs->flash_parameters = flash_get_parameters(fs->flash_device);
  726. if (fs->flash_parameters == NULL) {
  727. LOG_ERR("Could not obtain flash parameters");
  728. return -EINVAL;
  729. }
  730. write_block_size = flash_get_write_block_size(fs->flash_device);
  731. /* check that the write block size is supported */
  732. if (write_block_size > NVS_BLOCK_SIZE || write_block_size == 0) {
  733. LOG_ERR("Unsupported write block size");
  734. return -EINVAL;
  735. }
  736. /* check that sector size is a multiple of pagesize */
  737. rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info);
  738. if (rc) {
  739. LOG_ERR("Unable to get page info");
  740. return -EINVAL;
  741. }
  742. if (!fs->sector_size || fs->sector_size % info.size) {
  743. LOG_ERR("Invalid sector size");
  744. return -EINVAL;
  745. }
  746. /* check the number of sectors, it should be at least 2 */
  747. if (fs->sector_count < 2) {
  748. LOG_ERR("Configuration error - sector count");
  749. return -EINVAL;
  750. }
  751. rc = nvs_startup(fs);
  752. if (rc) {
  753. return rc;
  754. }
  755. /* nvs is ready for use */
  756. fs->ready = true;
  757. LOG_INF("%d Sectors of %d bytes", fs->sector_count, fs->sector_size);
  758. LOG_INF("alloc wra: %d, %x",
  759. (fs->ate_wra >> ADDR_SECT_SHIFT),
  760. (fs->ate_wra & ADDR_OFFS_MASK));
  761. LOG_INF("data wra: %d, %x",
  762. (fs->data_wra >> ADDR_SECT_SHIFT),
  763. (fs->data_wra & ADDR_OFFS_MASK));
  764. return 0;
  765. }
  766. ssize_t nvs_write(struct nvs_fs *fs, uint16_t id, const void *data, size_t len)
  767. {
  768. int rc, gc_count;
  769. size_t ate_size, data_size;
  770. struct nvs_ate wlk_ate;
  771. uint32_t wlk_addr, rd_addr;
  772. uint16_t required_space = 0U; /* no space, appropriate for delete ate */
  773. bool prev_found = false;
  774. if (!fs->ready) {
  775. LOG_ERR("NVS not initialized");
  776. return -EACCES;
  777. }
  778. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  779. data_size = nvs_al_size(fs, len);
  780. /* The maximum data size is sector size - 4 ate
  781. * where: 1 ate for data, 1 ate for sector close, 1 ate for gc done,
  782. * and 1 ate to always allow a delete.
  783. */
  784. if ((len > (fs->sector_size - 4 * ate_size)) ||
  785. ((len > 0) && (data == NULL))) {
  786. return -EINVAL;
  787. }
  788. /* find latest entry with same id */
  789. wlk_addr = fs->ate_wra;
  790. rd_addr = wlk_addr;
  791. while (1) {
  792. rd_addr = wlk_addr;
  793. rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
  794. if (rc) {
  795. return rc;
  796. }
  797. if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) {
  798. prev_found = true;
  799. break;
  800. }
  801. if (wlk_addr == fs->ate_wra) {
  802. break;
  803. }
  804. }
  805. if (prev_found) {
  806. /* previous entry found */
  807. rd_addr &= ADDR_SECT_MASK;
  808. rd_addr += wlk_ate.offset;
  809. if (len == 0) {
  810. /* do not try to compare with empty data */
  811. if (wlk_ate.len == 0U) {
  812. /* skip delete entry as it is already the
  813. * last one
  814. */
  815. return 0;
  816. }
  817. } else if (len == wlk_ate.len) {
  818. /* do not try to compare if lengths are not equal */
  819. /* compare the data and if equal return 0 */
  820. rc = nvs_flash_block_cmp(fs, rd_addr, data, len);
  821. if (rc <= 0) {
  822. return rc;
  823. }
  824. }
  825. } else {
  826. /* skip delete entry for non-existing entry */
  827. if (len == 0) {
  828. return 0;
  829. }
  830. }
  831. /* calculate required space if the entry contains data */
  832. if (data_size) {
  833. /* Leave space for delete ate */
  834. required_space = data_size + ate_size;
  835. }
  836. k_mutex_lock(&fs->nvs_lock, K_FOREVER);
  837. gc_count = 0;
  838. while (1) {
  839. if (gc_count == fs->sector_count) {
  840. /* gc'ed all sectors, no extra space will be created
  841. * by extra gc.
  842. */
  843. rc = -ENOSPC;
  844. goto end;
  845. }
  846. if (fs->ate_wra >= (fs->data_wra + required_space)) {
  847. rc = nvs_flash_wrt_entry(fs, id, data, len);
  848. if (rc) {
  849. goto end;
  850. }
  851. break;
  852. }
  853. rc = nvs_sector_close(fs);
  854. if (rc) {
  855. goto end;
  856. }
  857. rc = nvs_gc(fs);
  858. if (rc) {
  859. goto end;
  860. }
  861. gc_count++;
  862. }
  863. rc = len;
  864. end:
  865. k_mutex_unlock(&fs->nvs_lock);
  866. return rc;
  867. }
  868. int nvs_delete(struct nvs_fs *fs, uint16_t id)
  869. {
  870. return nvs_write(fs, id, NULL, 0);
  871. }
  872. ssize_t nvs_read_hist(struct nvs_fs *fs, uint16_t id, void *data, size_t len,
  873. uint16_t cnt)
  874. {
  875. int rc;
  876. uint32_t wlk_addr, rd_addr;
  877. uint16_t cnt_his;
  878. struct nvs_ate wlk_ate;
  879. size_t ate_size;
  880. if (!fs->ready) {
  881. LOG_ERR("NVS not initialized");
  882. return -EACCES;
  883. }
  884. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  885. if (len > (fs->sector_size - 2 * ate_size)) {
  886. return -EINVAL;
  887. }
  888. cnt_his = 0U;
  889. wlk_addr = fs->ate_wra;
  890. rd_addr = wlk_addr;
  891. while (cnt_his <= cnt) {
  892. rd_addr = wlk_addr;
  893. rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
  894. if (rc) {
  895. goto err;
  896. }
  897. if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) {
  898. cnt_his++;
  899. }
  900. if (wlk_addr == fs->ate_wra) {
  901. break;
  902. }
  903. }
  904. if (((wlk_addr == fs->ate_wra) && (wlk_ate.id != id)) ||
  905. (wlk_ate.len == 0U) || (cnt_his < cnt)) {
  906. return -ENOENT;
  907. }
  908. rd_addr &= ADDR_SECT_MASK;
  909. rd_addr += wlk_ate.offset;
  910. rc = nvs_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len));
  911. if (rc) {
  912. goto err;
  913. }
  914. return wlk_ate.len;
  915. err:
  916. return rc;
  917. }
  918. ssize_t nvs_read(struct nvs_fs *fs, uint16_t id, void *data, size_t len)
  919. {
  920. int rc;
  921. rc = nvs_read_hist(fs, id, data, len, 0);
  922. return rc;
  923. }
  924. ssize_t nvs_calc_free_space(struct nvs_fs *fs)
  925. {
  926. int rc;
  927. struct nvs_ate step_ate, wlk_ate;
  928. uint32_t step_addr, wlk_addr;
  929. size_t ate_size, free_space;
  930. if (!fs->ready) {
  931. LOG_ERR("NVS not initialized");
  932. return -EACCES;
  933. }
  934. ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
  935. free_space = 0;
  936. for (uint16_t i = 1; i < fs->sector_count; i++) {
  937. free_space += (fs->sector_size - ate_size);
  938. }
  939. step_addr = fs->ate_wra;
  940. while (1) {
  941. rc = nvs_prev_ate(fs, &step_addr, &step_ate);
  942. if (rc) {
  943. return rc;
  944. }
  945. wlk_addr = fs->ate_wra;
  946. while (1) {
  947. rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
  948. if (rc) {
  949. return rc;
  950. }
  951. if ((wlk_ate.id == step_ate.id) ||
  952. (wlk_addr == fs->ate_wra)) {
  953. break;
  954. }
  955. }
  956. if ((wlk_addr == step_addr) && step_ate.len &&
  957. (nvs_ate_valid(fs, &step_ate))) {
  958. /* count needed */
  959. free_space -= nvs_al_size(fs, step_ate.len);
  960. free_space -= ate_size;
  961. }
  962. if (step_addr == fs->ate_wra) {
  963. break;
  964. }
  965. }
  966. return free_space;
  967. }