1 #include <string.h>
2 #include <stdlib.h>
3 #include <time.h>
4 #include <pthread.h>
5 #include <errno.h>
6 #include <unistd.h>
7 #include <hal_timer.h>
8
9 #include "inter.h"
10
11 #define addr_to_blk(nor, addr) ((addr) / nor->blk_size)
12 #define addr_to_page(nor, addr) ((addr) / nor->page_size)
13
14 typedef struct {
15 #define CACHE_SIZE SZ_64K
16 char *buf;
17 #define INVALID_CACHE_ADDR ((unsigned int)-1)
18 unsigned int addr;
19 } cache_t;
20
21 struct nor_cache {
22 cache_t cache;
23 unsigned int last_write;
24
25 unsigned long *bitmap_page;
26 unsigned int page_cnt;
27 unsigned long *bitmap_blk;
28 unsigned int blk_cnt;
29
30 struct nor_flash *nor;
31 };
32 static struct nor_cache g_nor_cache;
33
34 hal_sem_t lock_nor_cache;
35
nor_lock_init(void)36 static inline int nor_lock_init(void)
37 {
38 lock_nor_cache = hal_sem_create(1);
39 if (!lock_nor_cache) {
40 SPINOR_ERR("create hal_sem lock for nor_flash failed\n");
41 return -1;
42 }
43 return 0;
44 }
45
nor_cache_lock(void)46 static int nor_cache_lock(void)
47 {
48 return hal_sem_wait(lock_nor_cache);
49 }
50
nor_cache_trylock(void)51 static int nor_cache_trylock(void)
52 {
53 return hal_sem_trywait(lock_nor_cache);
54 }
55
nor_cache_unlock(void)56 static int nor_cache_unlock(void)
57 {
58 return hal_sem_post(lock_nor_cache);
59 }
60
clear_cache(cache_t * c)61 static void clear_cache(cache_t *c)
62 {
63 SPINOR_DEBUG("clear cache addr 0x%x\n", c->addr);
64 memset(c->buf, 0xFF, CACHE_SIZE);
65 c->addr = INVALID_CACHE_ADDR;
66 }
67
delete_cache(cache_t * c)68 static void delete_cache(cache_t *c)
69 {
70 free(c->buf);
71 c->buf = NULL;
72 c->addr = 0;
73 }
74
init_cache(cache_t * c)75 static int init_cache(cache_t *c)
76 {
77 c->buf = malloc(CACHE_SIZE);
78 if (!c->buf)
79 return -ENOMEM;
80
81 clear_cache(c);
82 return 0;
83 }
84
init_bitmap(struct nor_cache * nc)85 static int init_bitmap(struct nor_cache *nc)
86 {
87 struct nor_flash *nor = nc->nor;
88
89 nc->page_cnt = CACHE_SIZE / nor->page_size;
90 nc->bitmap_page = malloc(BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
91 if (!nc->bitmap_page)
92 return -ENOMEM;
93 memset(nc->bitmap_page, 0, BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
94
95 nc->blk_cnt = CACHE_SIZE / nor->blk_size;
96 nc->bitmap_blk = malloc(BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
97 if (!nc->blk_cnt)
98 goto free_page;
99 memset(nc->bitmap_blk, 0, BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
100
101 return 0;
102 free_page:
103 free(nc->bitmap_page);
104 return -ENOMEM;
105 }
106
delete_bitmap(struct nor_cache * nc)107 static void delete_bitmap(struct nor_cache *nc)
108 {
109 free(nc->bitmap_blk);
110 nc->bitmap_blk = NULL;
111
112 free(nc->bitmap_page);
113 nc->bitmap_page = NULL;
114 }
115
nor_cache_init(struct nor_flash * nor)116 int nor_cache_init(struct nor_flash *nor)
117 {
118 int ret = -ENOMEM;
119 struct nor_cache *nc = &g_nor_cache;
120
121 /* initialized before */
122 if (nc->cache.buf)
123 return -EBUSY;
124
125 ret = nor_lock_init();
126 if (ret)
127 return ret;
128
129 nor_cache_lock();
130
131 ret = init_cache(&nc->cache);
132 if (ret)
133 goto unlock;
134
135 nc->nor = nor;
136 ret = init_bitmap(nc);
137 if (ret)
138 goto free_cache;
139
140 nor_cache_unlock();
141 return 0;
142
143 /*
144 * free_bitmap:
145 * delete_bitmap(nc);
146 */
147 free_cache:
148 delete_cache(&nc->cache);
149 unlock:
150 nor_cache_unlock();
151 return ret;
152 }
153
nor_cache_exit(void)154 void nor_cache_exit(void)
155 {
156 struct nor_cache *nc = &g_nor_cache;
157
158 delete_cache(&nc->cache);
159 delete_bitmap(nc);
160 }
161
check_cache_addr(cache_t * c,unsigned int addr,unsigned int len)162 static int check_cache_addr(cache_t *c, unsigned int addr, unsigned int len)
163 {
164 if (c->addr == INVALID_CACHE_ADDR)
165 return -1;
166 /* start boundary */
167 if (ALIGN_DOWN(addr, CACHE_SIZE) != c->addr)
168 return -1;
169 /* end boundary */
170 if (ALIGN_DOWN(addr + len - 1, CACHE_SIZE) != c->addr)
171 return -1;
172 return 0;
173 }
174
set_cache_addr(cache_t * c,unsigned int addr)175 static inline void set_cache_addr(cache_t *c, unsigned int addr)
176 {
177 c->addr = ALIGN_DOWN(addr, CACHE_SIZE);
178 SPINOR_DEBUG("set cache addr as 0x%x\n", c->addr);
179 }
180
get_addr_by_page(unsigned int page)181 static inline unsigned int get_addr_by_page(unsigned int page)
182 {
183 struct nor_cache *nc = &g_nor_cache;
184 struct nor_flash *nor = nc->nor;
185 cache_t *c = &nc->cache;
186
187 return c->addr + page * nor->page_size;
188 }
189
get_addr_by_blk(unsigned int blk)190 static inline unsigned int get_addr_by_blk(unsigned int blk)
191 {
192 struct nor_cache *nc = &g_nor_cache;
193 struct nor_flash *nor = nc->nor;
194 cache_t *c = &nc->cache;
195
196 return c->addr + blk * nor->blk_size;
197 }
198
set_bit(int nr,unsigned long * addr)199 static inline void set_bit(int nr, unsigned long *addr)
200 {
201 addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
202 }
203
test_bit(unsigned int nr,const unsigned long * addr)204 static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
205 {
206 return ((1UL << (nr % BITS_PER_LONG)) &
207 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
208 }
209
nor_flush_erase(struct nor_cache * nc)210 static int nor_flush_erase(struct nor_cache *nc)
211 {
212 struct nor_flash *nor = nc->nor;
213 unsigned int start, end;
214 int ret;
215
216 for (end = start = 0; end < nc->blk_cnt; end++) {
217 /* we should do erase lazy to get more continuous erase block */
218 if (test_bit(end, nc->bitmap_blk))
219 continue;
220 /* continuous zero, do nothing */
221 if (start == end) {
222 start = end + 1;
223 continue;
224 }
225
226 SPINOR_DEBUG("flush erase: addr 0x%x blk cnt %u\n",
227 get_addr_by_blk(start), end - start);
228 ret = nor_erase(get_addr_by_blk(start), (end - start) * nor->blk_size);
229 if (ret)
230 return ret;
231
232 start = end + 1;
233 }
234
235 if (start != end) {
236 SPINOR_DEBUG("flush erase: addr 0x%x blk cnt %u\n",
237 get_addr_by_blk(start), end - start);
238 ret = nor_erase(get_addr_by_blk(start), (end - start) * nor->blk_size);
239 if (ret)
240 return ret;
241 }
242
243 memset(nc->bitmap_blk, 0, BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
244 return 0;
245 }
246
nor_flush_write(struct nor_cache * nc)247 static int nor_flush_write(struct nor_cache *nc)
248 {
249 struct nor_flash *nor = nc->nor;
250 cache_t *c = &nc->cache;
251 unsigned int page, i;
252 char *buf = c->buf;
253 int ret;
254
255 for (i = 0; i < nc->page_cnt; i++) {
256 /* let start with (last_write page + 1) */
257 page = (i + nc->last_write + 1) % nc->page_cnt;
258
259 if (!test_bit(page, nc->bitmap_page))
260 continue;
261
262 SPINOR_DEBUG("flush write: addr 0x%x\n", get_addr_by_page(page));
263 ret = nor_write(get_addr_by_page(page),
264 buf + page * nor->page_size, nor->page_size);
265 if (ret)
266 return ret;
267 }
268
269 memset(nc->bitmap_page, 0, BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
270 return 0;
271 }
272
nor_flush_cache(struct nor_cache * nc)273 static int nor_flush_cache(struct nor_cache *nc)
274 {
275 int ret;
276
277 if (nc->cache.addr == INVALID_CACHE_ADDR)
278 return 0;
279
280 ret = nor_flush_erase(nc);
281 if (ret)
282 return ret;
283
284 ret = nor_flush_write(nc);
285 if (ret)
286 return ret;
287
288 clear_cache(&nc->cache);
289 return 0;
290 }
291
nor_cache_write(unsigned int addr,char * buf,unsigned int len)292 int nor_cache_write(unsigned int addr, char *buf, unsigned int len)
293 {
294 struct nor_cache *nc = &g_nor_cache;
295 struct nor_flash *nor = nc->nor;
296 cache_t *c = &nc->cache;
297 char *pbuf;
298 unsigned int page;
299 int ret;
300
301 SPINOR_DEBUG("try to write addr 0x%x with size %u\n", addr, len);
302
303 if (addr % nor->page_size) {
304 SPINOR_ERR("addr %u must align to page size %u\n", addr, nor->page_size);
305 return -EINVAL;
306 }
307
308 if (len % nor->page_size) {
309 SPINOR_ERR("len %u must align to page size %u\n", len, nor->page_size);
310 return -EINVAL;
311 }
312
313 ret = nor_cache_lock();
314 if (ret)
315 return ret;
316
317 /* if over CACHE_SIZE, write roughly */
318 if (len > CACHE_SIZE ||
319 (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
320 ret = nor_flush_cache(nc);
321 if (ret)
322 goto unlock;
323
324 ret = nor_write(addr, buf, len);
325 goto unlock;
326 }
327
328 ret = check_cache_addr(c, addr, len);
329 if (ret) {
330 SPINOR_DEBUG("write addr 0x%x len %d over cache addr 0x%x\n",
331 addr, len, c->addr);
332
333 /* if write to a new addr out of cache, just flush cache for new one */
334 ret = nor_flush_cache(nc);
335 if (ret)
336 goto unlock;
337
338 /* ready new cache for data */
339 set_cache_addr(c, addr);
340 }
341
342 pbuf = c->buf + (addr - c->addr);
343 page = addr_to_page(nor, addr - c->addr);
344 while (len) {
345 unsigned int size = min(len, nor->page_size);
346
347 memcpy(pbuf, buf, size);
348
349 SPINOR_DEBUG("write: mark page %d abs addr 0x%x\n", page,
350 get_addr_by_page(page));
351 set_bit(page, nc->bitmap_page);
352
353 /*
354 * The order of page to flush-write is very pivotal. On lfs,
355 * the last page before sync always meta data. We must ensure normal
356 * data to write to flash before meta data. So, we must save the
357 * last page and flush this page at last.
358 */
359 nc->last_write = page;
360
361 page += 1;
362 pbuf += size;
363 buf += size;
364 len -= size;
365 }
366
367 ret = 0;
368 unlock:
369 nor_cache_unlock();
370 return ret;
371 }
372
nor_cache_read(unsigned int addr,char * buf,unsigned int len)373 int nor_cache_read(unsigned int addr, char *buf, unsigned int len)
374 {
375 struct nor_cache *nc = &g_nor_cache;
376 struct nor_flash *nor = nc->nor;
377 cache_t *c = &nc->cache;
378 char *pbuf;
379 unsigned int page, blk;
380 int ret;
381
382 SPINOR_DEBUG("try to read addr 0x%x with size %u\n", addr, len);
383
384 if (addr % nor->page_size) {
385 SPINOR_ERR("addr %u must align to page size %u\n", addr, nor->page_size);
386 return -EINVAL;
387 }
388
389 if (len % nor->page_size) {
390 SPINOR_ERR("len %u must align to page size %u\n", len, nor->page_size);
391 return -EINVAL;
392 }
393
394 ret = nor_cache_lock();
395 if (ret)
396 return ret;
397
398 /* if over CACHE_SIZE, write roughly */
399 if (len > CACHE_SIZE ||
400 (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
401 ret = nor_flush_cache(nc);
402 if (ret)
403 goto unlock;
404
405 ret = nor_read(addr, buf, len);
406 goto unlock;
407 }
408
409 ret = check_cache_addr(c, addr, len);
410 if (ret) {
411 ret = nor_read(addr, buf, len);
412 goto unlock;
413 }
414
415 pbuf = c->buf + (addr - c->addr);
416 page = addr_to_page(nor, addr - c->addr);
417 while (len) {
418 unsigned int size = min(len, nor->page_size);
419
420 if (test_bit(page, nc->bitmap_page)) {
421 SPINOR_DEBUG("read match cache page %d addr 0x%x\n", page,
422 get_addr_by_page(page));
423 memcpy(buf, pbuf, size);
424 } else {
425 blk = addr_to_blk(nor, addr - c->addr);
426 if (test_bit(blk, nc->bitmap_blk)) {
427 SPINOR_DEBUG("read match cache erase blk 0x%x addr 0x%x\n",
428 blk, get_addr_by_page(page));
429 memset(buf, 0xFF, size);
430 } else {
431 SPINOR_DEBUG("read not match cache addr 0x%x\n",
432 get_addr_by_page(page));
433 ret = nor_read(get_addr_by_page(page), buf, size);
434 if (ret)
435 goto unlock;
436 }
437 }
438
439 page += 1;
440 pbuf += size;
441 buf += size;
442 len -= size;
443 }
444
445 ret = 0;
446 unlock:
447 nor_cache_unlock();
448 return ret;
449 }
450
nor_cache_sync(void)451 int nor_cache_sync(void)
452 {
453 struct nor_cache *nc = &g_nor_cache;
454 int ret;
455
456 SPINOR_DEBUG("try to sync nor cache\n");
457 ret = nor_cache_trylock();
458 if (ret) {
459 SPINOR_DEBUG("trylock fail, skip sync nor cache\n");
460 return ret;
461 }
462
463 ret = nor_flush_cache(nc);
464
465 nor_cache_unlock();
466
467 return ret? -1 : 0;
468 }
469
nor_cache_erase(unsigned int addr,unsigned int len)470 int nor_cache_erase(unsigned int addr, unsigned int len)
471 {
472 struct nor_cache *nc = &g_nor_cache;
473 struct nor_flash *nor = nc->nor;
474 cache_t *c = &nc->cache;
475 unsigned int blk;
476 int ret;
477
478 SPINOR_DEBUG("try to erase addr 0x%x with size %u\n", addr, len);
479
480 if (addr % nor->blk_size) {
481 SPINOR_ERR("addr %u must align to blk size %u\n", addr, nor->blk_size);
482 return -EINVAL;
483 }
484
485 if (len % nor->blk_size) {
486 SPINOR_ERR("len %u must align to blk size %u\n", len, nor->blk_size);
487 return -EINVAL;
488 }
489
490 ret = nor_cache_lock();
491 if (ret)
492 return ret;
493
494 /* if over CACHE_SIZE, erase roughly */
495 if (len > CACHE_SIZE ||
496 (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
497 /* just flush earse operation */
498 ret = nor_flush_erase(nc);
499 if (ret)
500 goto unlock;
501
502 ret = nor_erase(addr, len);
503 goto unlock;
504 }
505
506 ret = check_cache_addr(c, addr, len);
507 if (ret) {
508 SPINOR_DEBUG("erase addr 0x%x len %d over cache addr 0x%x\n",
509 addr, len, c->addr);
510
511 /* if erase to a new addr out of cache, just flush cache for new one */
512 ret = nor_flush_cache(nc);
513 if (ret)
514 goto unlock;
515
516 /* ready new cache for data */
517 set_cache_addr(c, addr);
518 }
519
520 blk = addr_to_blk(nor, addr - c->addr);
521 for (; len; len -= nor->blk_size, blk++) {
522 SPINOR_DEBUG("erase: mark blk %d addr 0x%x\n", blk, get_addr_by_blk(blk));
523 set_bit(blk, nc->bitmap_blk);
524 }
525
526 ret = 0;
527 unlock:
528 nor_cache_unlock();
529 return ret;
530 }
531