flash_blockdev_byte_access.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (c) 2018 Nordic Semiconductor ASA
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <device.h>
  7. #include <drivers/flash.h>
  8. #include <init.h>
  9. #include <kernel.h>
  10. #include <sys/util.h>
  11. #include <random/rand32.h>
  12. #include <stats/stats.h>
  13. #include <string.h>
  14. #include <logging/log.h>
  15. #include <board_cfg.h>
  16. LOG_MODULE_REGISTER(block_dev_acts, CONFIG_FLASH_LOG_LEVEL);
  17. #define BLOCK_DEV_PAGE_SIZE 2048
  18. #define BLOCK_DEV_SECTOR_SIZE 512
  19. #define BLOCK_DEV_CACHE_SECT_NUM (BLOCK_DEV_PAGE_SIZE/BLOCK_DEV_SECTOR_SIZE)
  20. #define BLOCK_DEV_CACHE_NUM_ITEM 2
  21. struct block_dev_cache_item {
  22. u16_t cache_valid:1;
  23. u16_t write_valid:1;
  24. u16_t reserved:14;
  25. u16_t seq_index;
  26. u32_t cache_start_sector;
  27. u8_t cache_data[BLOCK_DEV_PAGE_SIZE];
  28. };
  29. struct block_dev_flash_data{
  30. const struct device *flash_dev;
  31. u16_t g_seq_index;
  32. struct block_dev_cache_item item[BLOCK_DEV_CACHE_NUM_ITEM];
  33. };
  34. struct block_dev_config {
  35. const char * dev_name;
  36. };
  37. #define DEV_CFG(dev) \
  38. ((const struct block_dev_config *const)(dev)->config)
  39. #define DEV_DATA(dev) \
  40. ((struct block_dev_flash_data *)(dev)->data)
  41. static void _block_dev_cache_init(struct block_dev_flash_data *data)
  42. {
  43. int i;
  44. for(i = 0; i < BLOCK_DEV_CACHE_NUM_ITEM; i++) {
  45. data->item[i].cache_valid = 0;
  46. data->item[i].write_valid = 0;
  47. }
  48. }
  49. static int _block_dev_flush_cache(const struct device *dev, struct block_dev_cache_item *item)
  50. {
  51. int err;
  52. if(item->write_valid && item->cache_valid){
  53. item->write_valid = 0;
  54. LOG_DBG("nand flush startsec 0x%x, buf %p", item->cache_start_sector, item->cache_data);
  55. err = flash_write(dev, (item->cache_start_sector)<<9, item->cache_data, BLOCK_DEV_CACHE_SECT_NUM<<9);
  56. if (err < 0) {
  57. LOG_ERR("nand write error %d, offsec 0x%x, buf %p",
  58. err, item->cache_start_sector, item->cache_data);
  59. return -EIO;
  60. }
  61. }
  62. return 0;
  63. }
  64. static int _block_dev_update_cache(const struct device *dev, struct block_dev_cache_item *item, u32_t sec_off, u16_t *seq)
  65. {
  66. int err;
  67. u32_t page_off = sec_off/BLOCK_DEV_CACHE_SECT_NUM;
  68. item->cache_start_sector = page_off * BLOCK_DEV_CACHE_SECT_NUM;
  69. item->cache_valid = 1;
  70. item->write_valid = 0;
  71. item->seq_index = *seq;
  72. *seq = *seq + 1;
  73. LOG_DBG("nand update start_sec 0x%x, buf %p, sec_off=0x%x,0x%x",
  74. item->cache_start_sector, item->cache_data, sec_off, sec_off*BLOCK_DEV_SECTOR_SIZE);
  75. err = flash_read(dev, (item->cache_start_sector)<<9,item->cache_data, BLOCK_DEV_CACHE_SECT_NUM<<9);
  76. if (err < 0) {
  77. LOG_ERR("nand read error %d, offsec 0x%x, buf %p",
  78. err, item->cache_start_sector, item->cache_data);
  79. return -EIO;
  80. }
  81. return 0;
  82. }
  83. static struct block_dev_cache_item * _block_dev_find_cache_item(struct block_dev_flash_data *data, u32_t sec_off)
  84. {
  85. for (int i = 0; i < BLOCK_DEV_CACHE_NUM_ITEM; i++) {
  86. if (data->item[i].cache_valid && (data->item[i].cache_start_sector <= sec_off)
  87. && (sec_off < data->item[i].cache_start_sector+BLOCK_DEV_CACHE_SECT_NUM)) {
  88. return &data->item[i];
  89. }
  90. }
  91. //LOG_INF("sect_off 0x%x not in cache", sec_off);
  92. return NULL;
  93. }
  94. /*
  95. if have invaid ,use invalid, else use same
  96. */
  97. static struct block_dev_cache_item * _block_dev_new_cache_item(struct block_dev_flash_data *data, u32_t sec_off)
  98. {
  99. int i;
  100. struct block_dev_cache_item *invalid, *valid, *new_item;
  101. valid = invalid = NULL;
  102. for(i = 0; i < BLOCK_DEV_CACHE_NUM_ITEM; i++) {
  103. if(data->item[i].cache_valid){
  104. if(valid == NULL){
  105. valid = &data->item[i];
  106. }else{
  107. if(valid->seq_index > data->item[i].seq_index)
  108. valid = &data->item[i];
  109. }
  110. }else{
  111. invalid = &data->item[i];
  112. break;
  113. }
  114. }
  115. if(invalid){
  116. new_item = invalid;
  117. }else{
  118. new_item = valid;
  119. }
  120. //LOG_INF("new item sect_off 0x%x, 0x%x is_write=%d", sec_off, sec_off*BLOCK_DEV_SECTOR_SIZE, is_write);
  121. _block_dev_flush_cache(data->flash_dev, new_item);
  122. _block_dev_update_cache(data->flash_dev, new_item, sec_off, &data->g_seq_index);
  123. return new_item;
  124. }
  125. #ifndef CONFIG_BOARD_NANDBOOT
  126. #if 0
  127. void nvram_storage_flush(struct block_dev_flash_data *data)
  128. {
  129. int i;
  130. for(i = 0; i < BLOCK_DEV_CACHE_NUM_ITEM; i++)
  131. _block_dev_flush_cache(data->flash_dev, &data->item[i]);
  132. }
  133. #endif
  134. #endif
  135. /*brwe = 0 (read), 1(write), 2(erase)*/
  136. static void block_dev_storage_read_write(struct block_dev_flash_data *data, uint64_t addr,
  137. uint8_t *buf, uint64_t size, int brwe)
  138. {
  139. int32_t wsize, len;
  140. uint32_t sector_off, addr_in_sec,cache_sec_index;
  141. static struct block_dev_cache_item * item;
  142. wsize = size;
  143. sector_off = addr / BLOCK_DEV_SECTOR_SIZE;
  144. addr_in_sec = addr - (sector_off*BLOCK_DEV_SECTOR_SIZE);
  145. //LOG_INF("sect_off 0x%x, size 0x%x iswrite %d", sector_off, size, brwe);
  146. while(wsize > 0) {
  147. item = _block_dev_find_cache_item(data, sector_off);
  148. if(item == NULL)
  149. item = _block_dev_new_cache_item(data, sector_off);
  150. cache_sec_index = sector_off -item->cache_start_sector;
  151. if(addr_in_sec){//not sector algin
  152. len = BLOCK_DEV_SECTOR_SIZE - addr_in_sec;
  153. if(len > wsize)
  154. len = wsize;
  155. if(brwe == 1)
  156. memcpy(&item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE+addr_in_sec],buf, len);
  157. else if(brwe == 0)
  158. memcpy(buf, &item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE+addr_in_sec], len);
  159. else
  160. memset(&item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE+addr_in_sec], 0xff, len);
  161. buf += len;
  162. wsize -= len;
  163. cache_sec_index++;
  164. addr_in_sec = 0; // next align
  165. }
  166. for( ; wsize && (cache_sec_index < BLOCK_DEV_CACHE_SECT_NUM); cache_sec_index++){
  167. if(wsize < BLOCK_DEV_SECTOR_SIZE)
  168. len = wsize;
  169. else
  170. len = BLOCK_DEV_SECTOR_SIZE;
  171. if(brwe == 1)
  172. memcpy(&item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE],buf, len);
  173. else if(brwe == 0)
  174. memcpy(buf, &item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE], len);
  175. else
  176. memset(&item->cache_data[cache_sec_index*BLOCK_DEV_SECTOR_SIZE], 0xff, len);
  177. buf += len;
  178. wsize -= len;
  179. }
  180. if(brwe)
  181. item->write_valid = 1; //dirty
  182. sector_off = item->cache_start_sector + BLOCK_DEV_CACHE_SECT_NUM;
  183. }
  184. }
  185. static int block_dev_storage_write(const struct device *dev, uint64_t offset,
  186. const void *data,
  187. uint64_t len)
  188. {
  189. struct block_dev_flash_data *dev_data = DEV_DATA(dev);
  190. block_dev_storage_read_write(dev_data, offset, (uint8_t *)data, len, 1);
  191. //LOG_INF("nand write end 0x%x len 0x%x", addr, size);
  192. return 0;
  193. }
  194. static int block_dev_storage_read(const struct device *dev, uint64_t offset,
  195. void *data,
  196. uint64_t len)
  197. {
  198. struct block_dev_flash_data *dev_data = DEV_DATA(dev);
  199. block_dev_storage_read_write(dev_data, offset, (uint8_t *)data, len, 0);
  200. //LOG_INF("nand read end 0x%x len 0x%x", addr, size);
  201. return 0;
  202. }
  203. static int block_dev_storage_erase(const struct device *dev, uint64_t offset,
  204. uint64_t size)
  205. {
  206. uint8_t buf[1]; // not use;
  207. struct block_dev_flash_data *dev_data = DEV_DATA(dev);
  208. block_dev_storage_read_write(dev_data, offset, buf, size, 2);
  209. //LOG_INF("nand erase end 0x%x len 0x%x", addr, size);
  210. return 0;
  211. }
  212. static const struct flash_parameters flash_blockdev_parameters = {
  213. .write_block_size = 0x1000,
  214. .erase_value = 0xff,
  215. };
  216. static const struct flash_parameters *
  217. block_dev_storage_get_parameters(const struct device *dev)
  218. {
  219. ARG_UNUSED(dev);
  220. return &flash_blockdev_parameters;
  221. }
  222. static const struct flash_driver_api flash_block_dev_api = {
  223. .read = block_dev_storage_read,
  224. .write = block_dev_storage_write,
  225. .erase = block_dev_storage_erase,
  226. .get_parameters = block_dev_storage_get_parameters,
  227. };
  228. int block_dev_pm_control(const struct device *dev, enum pm_device_action action)
  229. {
  230. #ifdef CONFIG_PM_DEVICE
  231. int i;
  232. int ret;
  233. int err;
  234. u8_t *buf;
  235. u32_t offset;
  236. struct block_dev_flash_data *data = DEV_DATA(dev);
  237. switch(action) {
  238. case PM_DEVICE_ACTION_TURN_OFF:
  239. for(i = 0; i < BLOCK_DEV_CACHE_NUM_ITEM; i++) {
  240. if (data->item[i].write_valid && data->item[i].cache_valid) {
  241. data->item[i].write_valid = 0;
  242. buf = data->item[i].cache_data;
  243. offset = data->item[i].cache_start_sector;
  244. LOG_DBG("nand flush startsec 0x%x, buf %p\n", offset, buf);
  245. err = flash_write(data->flash_dev, offset<<9, buf, BLOCK_DEV_CACHE_SECT_NUM<<9);
  246. if (err < 0) {
  247. LOG_ERR("nand write error %d, offsec 0x%x, buf %p.\n", err, offset, buf);
  248. return -EIO;
  249. }
  250. }
  251. }
  252. #ifdef CONFIG_SPINAND_ACTS
  253. flash_flush(data->flash_dev, 0);
  254. #endif
  255. break;
  256. default:
  257. ret = -EINVAL;
  258. }
  259. #endif
  260. return 0;
  261. }
  262. static int block_dev_flash_init(const struct device *dev)
  263. {
  264. struct block_dev_flash_data *data = DEV_DATA(dev);
  265. const struct block_dev_config *config = DEV_CFG(dev);
  266. data->flash_dev = device_get_binding(config->dev_name);
  267. if(!data->flash_dev){
  268. return -EINVAL;
  269. }
  270. _block_dev_cache_init(data);
  271. return 0;
  272. }
  273. static struct block_dev_flash_data block_dev_flash_acts_data;
  274. static const struct block_dev_config block_dev_flash_acts_config = {
  275. #ifdef CONFIG_SPINAND_ACTS
  276. .dev_name = CONFIG_SPINAND_FLASH_NAME,
  277. #elif defined(CONFIG_MMC_ACTS)
  278. .dev_name = CONFIG_SD_NAME,
  279. #endif
  280. };
  281. DEVICE_DEFINE(block_dev_acts, CONFIG_BLOCK_DEV_FLASH_NAME, &block_dev_flash_init, block_dev_pm_control, &block_dev_flash_acts_data,
  282. &block_dev_flash_acts_config, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &flash_block_dev_api);