1 #define pr_fmt(fmt) "blkpart: " fmt
2
3 #include <stdio.h>
4 #include <stdint.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <blkpart.h>
10 #include <rtthread.h>
11 #include <rtdevice.h>
12
13 #include <drv_sdmmc.h>
14
15 #define MIN(a, b) ((a) > (b) ? (b) : (a))
16 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
17 #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
18 #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
19
20 static struct blkpart *blk_head = NULL;
21
blkpart_del_list(struct blkpart * blk)22 void blkpart_del_list(struct blkpart *blk)
23 {
24 struct blkpart *pblk, *pre;
25
26 if (!blk_head)
27 {
28 return;
29 }
30
31 pblk = pre = blk_head;
32 for (pblk = blk_head; pblk; pre = pblk, pblk = pblk->next)
33 {
34 if (pblk == blk)
35 {
36 if (pblk == blk_head)
37 {
38 blk_head = NULL;
39 }
40 else
41 {
42 pre->next = pblk->next;
43 }
44 break;
45 }
46 }
47 }
48
blkpart_add_list(struct blkpart * blk)49 void blkpart_add_list(struct blkpart *blk)
50 {
51 struct blkpart *pblk, *pre;
52
53 blk->next = NULL;
54
55 if (!blk_head)
56 {
57 blk_head = blk;
58 return;
59 }
60
61 pblk = pre = blk_head;
62 while (pblk)
63 {
64 pre = pblk;
65 pblk = pblk->next;
66 }
67 pre->next = blk;
68 }
69
del_blkpart(struct blkpart * blk)70 void del_blkpart(struct blkpart *blk)
71 {
72 int i;
73
74 if (!blk)
75 {
76 return;
77 }
78
79 for (i = 0; i < blk->n_parts; i++)
80 {
81 struct part *part = &blk->parts[i];
82
83 if (!part)
84 {
85 continue;
86 }
87 }
88
89 blkpart_del_list(blk);
90 }
91
get_part_by_index(const char * blk_name,uint32_t index)92 struct part *get_part_by_index(const char *blk_name, uint32_t index)
93 {
94 struct blkpart *blk = blk_head;
95
96 for (blk = blk_head; blk; blk = blk->next)
97 {
98 if (!strcmp(blk_name, blk->name))
99 {
100 if (index == 0)
101 {
102 return &blk->root;
103 }
104 else if (index == PARTINDEX_THE_LAST)
105 {
106 return &blk->parts[blk->n_parts - 1];
107 }
108 else if (blk->n_parts >= index)
109 {
110 return &blk->parts[index - 1];
111 }
112 else
113 {
114 return NULL;
115 }
116 }
117 }
118 return NULL;
119 }
120
121 #ifdef CONFIG_BLKPART_SHOW_INFO_CMD
part_info_main(int argc,char ** argv)122 static int part_info_main(int argc, char **argv)
123 {
124 int i;
125 struct blkpart *blk;
126 struct part *part;
127
128 for (blk = blk_head; blk; blk = blk->next)
129 {
130 for (i = 0; i < blk->n_parts; i++)
131 {
132 part = &blk->parts[i];
133 printf("%s(%s): bytes 0x%llx off 0x%llx\n", part->name, part->devname,
134 part->bytes, part->off);
135 }
136 }
137
138 return 0;
139 }
140 FINSH_FUNCTION_EXPORT_CMD(part_info_main, __cmd_part_info, dump nor partitions);
141 #endif
142
get_part_by_name(const char * name)143 struct part *get_part_by_name(const char *name)
144 {
145 struct blkpart *blk;
146
147 if (!strncmp(name, "/dev/", sizeof("/dev/") - 1))
148 {
149 name += sizeof("/dev/") - 1;
150 }
151
152 for (blk = blk_head; blk; blk = blk->next)
153 {
154 int i;
155
156 for (i = 0; i < blk->n_parts; i++)
157 {
158 struct part *part = &blk->parts[i];
159
160 if (!strcmp(part->name, name))
161 {
162 return part;
163 }
164
165 if (!strcmp(part->devname, name))
166 {
167 return part;
168 }
169 }
170 }
171 return NULL;
172 }
173
get_blkpart_by_name(const char * name)174 struct blkpart *get_blkpart_by_name(const char *name)
175 {
176 struct blkpart *blk;
177
178 if (!name)
179 {
180 return blk_head;
181 }
182
183 for (blk = blk_head; blk; blk = blk->next)
184 {
185 if (!strcmp(blk->name, name))
186 {
187 return blk;
188 }
189 }
190 return NULL;
191 }
192
part_read(rt_device_t dev,rt_off_t offset,void * data,rt_size_t size)193 rt_size_t part_read(rt_device_t dev, rt_off_t offset, void *data, rt_size_t size)
194 {
195 if (size == 0)
196 {
197 return 0;
198 }
199
200 ssize_t ret, sz = 0;
201 struct part *part = (struct part *)dev->user_data;
202 struct blkpart *blk = part->blk;
203 rt_device_t spinor_dev = blk->dev;
204 size *= blk->blk_bytes; /* sector to size */
205 offset *= blk->blk_bytes;
206
207 char *page_buf = NULL;
208
209 if (offset >= part->bytes)
210 {
211 printf("read offset %lu over part size %lu\n", offset, part->bytes);
212 return 0;
213 }
214
215 if (offset + size > part->bytes)
216 {
217 printf("read %s(%s) over limit: offset %lu + size %lu over %lu\n",
218 part->name, part->devname, offset, size, part->bytes);
219 }
220
221 size = MIN(part->bytes - offset, size);
222 pr_debug("read %s(%s) off 0x%x size %lu\n", part->name, part->devname,
223 offset, size);
224
225 offset += part->off;
226
227 if (offset % blk->page_bytes || size % blk->page_bytes)
228 {
229 page_buf = malloc(blk->page_bytes);
230 if (!page_buf)
231 {
232 return -ENOMEM;
233 }
234 memset(page_buf, 0, blk->page_bytes);
235 }
236
237 /**
238 * Step 1:
239 * read the beginning data that not align to block size
240 */
241 if (offset % blk->page_bytes)
242 {
243 uint32_t addr, poff, len;
244
245 addr = ALIGN_DOWN(offset, blk->page_bytes);
246 poff = offset - addr;
247 len = MIN(blk->page_bytes - poff, size);
248
249 pr_debug("offset %lu not align %u, fix them before align read\n",
250 offset, blk->blk_bytes);
251 pr_debug("step1: read page data from addr 0x%x\n", addr);
252
253 ret = rt_dev_read(spinor_dev, addr / blk->page_bytes, page_buf, blk->page_bytes / blk->page_bytes);
254 ret *= blk->page_bytes;
255 if (ret != blk->blk_bytes)
256 {
257 goto err;
258 }
259
260 pr_debug("step2: copy page data to buf with page offset 0x%x and len %u\n",
261 poff, len);
262 memcpy(data, page_buf + poff, len);
263
264 offset += len;
265 data += len;
266 sz += len;
267 size -= len;
268 }
269
270 /**
271 * Step 2:
272 * read data that align to block size
273 */
274 while (size >= blk->page_bytes)
275 {
276 uint32_t len = (size/blk->page_bytes)*blk->page_bytes;
277
278 ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, (char *)data, len / blk->blk_bytes);
279 ret *= blk->page_bytes;
280 if (ret != len)
281 {
282 goto err;
283 }
284
285 offset += len;
286 data += len;
287 sz += len;
288 size -= len;
289 }
290
291 /**
292 * Step 3:
293 * read the last data that not align to block size
294 */
295 if (size)
296 {
297 pr_debug("last size %u not align %u, read them\n", size, blk->blk_bytes);
298
299 pr_debug("step1: read page data from addr 0x%x\n", offset);
300 ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, page_buf, blk->page_bytes / blk->page_bytes);
301 ret *= blk->page_bytes;
302 if (ret != blk->page_bytes)
303 {
304 goto err;
305 }
306
307 pr_debug("step2: copy page data to buf with page with len %u\n", size);
308 memcpy(data, page_buf, size);
309 sz += size;
310 }
311
312 #ifdef DEBUG
313 pr_debug("read data:\n");
314 hexdump(data, sz);
315 #endif
316 ret = 0;
317 goto out;
318
319 err:
320 pr_err("read failed - %d\n", (int)ret);
321 out:
322 if (page_buf)
323 {
324 free(page_buf);
325 }
326 return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
327 }
328
do_write_without_erase(rt_device_t dev,struct blkpart * blk,uint32_t addr,uint32_t size,char * buf)329 int do_write_without_erase(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
330 {
331 return rt_dev_write(dev, addr, buf, size);
332 }
do_erase_write_blk(rt_device_t dev,struct blkpart * blk,uint32_t addr,uint32_t size,char * buf)333 static int do_erase_write_blk(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
334 {
335 #if 0
336 /* The code is prepared for elmfat which mounted at spinor */
337 int ret;
338 uint8_t *read_buf;
339 unsigned int align_addr = ALIGN_DOWN(addr, blk->blk_bytes);
340
341 read_buf = malloc(blk->blk_bytes);
342 if (!read_buf)
343 {
344 return -ENOMEM;
345 }
346 memset(read_buf, 0, blk->blk_bytes);
347
348 ret = dev->read(dev, align_addr, read_buf, blk->blk_bytes);
349 if (ret != blk->blk_bytes)
350 {
351 free(read_buf);
352 return -EIO;
353 }
354
355 if (!(align_addr % blk->blk_bytes))
356 {
357 blk_dev_erase_t erase_sector;
358 memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
359 erase_sector.addr = align_addr;
360 erase_sector.len = blk->blk_bytes;
361 ret = rt_dev_control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
362 if (ret)
363 {
364 free(read_buf);
365 return ret;
366 }
367 }
368
369 memcpy(read_buf + (addr - align_addr), buf, blk->page_bytes);
370
371 ret = rt_dev_write(dev, align_addr, read_buf, blk->blk_bytes);
372 free(read_buf);
373 if (ret == blk->blk_bytes)
374 {
375 return blk->page_bytes;
376 }
377 else
378 {
379 return -EIO;
380 }
381 #else
382 int ret = -1;
383
384 blk_dev_erase_t erase_sector;
385 memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
386 erase_sector.addr = addr;
387 erase_sector.len = size;
388 ret = rt_dev_control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
389 if (ret)
390 {
391 return -EIO;
392 }
393
394 ret = rt_dev_write(dev, addr, buf, size);
395 if (ret == size)
396 {
397 return size;
398 }
399 else
400 {
401 return -EIO;
402 }
403 #endif
404 }
_part_write(rt_device_t dev,rt_off_t offset,const void * data,rt_size_t size,int erase_before_write)405 rt_size_t _part_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size, int erase_before_write)
406 {
407 ssize_t ret, sz = 0;
408 struct part *part = (struct part *)dev->user_data;
409 struct blkpart *blk = part->blk;
410 rt_device_t spinor_dev = blk->dev;
411 char *blk_buf = NULL;
412 int (*pwrite)(rt_device_t dev, struct blkpart * blk, uint32_t addr, uint32_t size, char *buf);
413
414 if (size == 0)
415 {
416 return 0;
417 }
418 size *= blk->blk_bytes; /* sector to size */
419 offset *= blk->blk_bytes;
420
421 if (offset >= part->bytes)
422 {
423 printf("write offset %lu over part size %lu\n", offset, part->bytes);
424 return 0;
425 }
426
427 if (offset + size > part->bytes)
428 {
429 printf("write %s(%s) over limit: offset %lu + size %lu over %lu\n",
430 part->name, part->devname, offset, size, part->bytes);
431 }
432
433 size = MIN(part->bytes - offset, size);
434 pr_debug("write %s(%s) off 0x%x size %lu (erase %d)\n", part->name,
435 part->devname, offset, size, erase_before_write);
436 offset += part->off;
437
438 if (offset % blk->blk_bytes || size % blk->blk_bytes)
439 {
440 blk_buf = malloc(blk->blk_bytes);
441 if (!blk_buf)
442 {
443 return -ENOMEM;
444 }
445 memset(blk_buf, 0, blk->blk_bytes);
446 }
447
448 if (erase_before_write)
449 {
450 pwrite = do_erase_write_blk;
451 }
452 else
453 {
454 pwrite = do_write_without_erase;
455 }
456
457 /**
458 * Step 1:
459 * write the beginning data that not align to block size
460 */
461 if (offset % blk->blk_bytes)
462 {
463 uint32_t addr, poff, len;
464
465 addr = ALIGN_DOWN(offset, blk->blk_bytes);
466 poff = offset - addr;
467 len = MIN(blk->blk_bytes - poff, size);
468
469 pr_debug("offset %u not align %u, fix them before align write\n",
470 offset, blk->blk_bytes);
471 pr_debug("step1: read page data from addr 0x%x\n", addr);
472 ret = rt_dev_read(spinor_dev, addr / blk->blk_bytes, blk_buf, blk->blk_bytes / blk->blk_bytes);
473 ret *= blk->blk_bytes;
474 if (ret != blk->blk_bytes)
475 {
476 goto err;
477 }
478
479 /* addr must less or equal to address */
480 pr_debug("step2: copy buf data to page data with page offset 0x%x and len %u\n",
481 poff, len);
482 memcpy(blk_buf + poff, data, len);
483
484 pr_debug("step3: flush the fixed page data\n");
485 ret = pwrite(spinor_dev, blk, addr / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
486 ret *= blk->blk_bytes;
487 if (ret != blk->blk_bytes)
488 {
489 goto err;
490 }
491
492 offset += len;
493 data += len;
494 sz += len;
495 size -= len;
496 }
497 while (size >= blk->blk_bytes)
498 {
499 uint32_t len = (size/blk->blk_bytes)*blk->blk_bytes;
500 ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, len / blk->blk_bytes, (char *)data);
501 ret *= blk->blk_bytes;
502 if (ret != len)
503 {
504 goto err;
505 }
506
507 offset += len;
508 data += len;
509 sz += len;
510 size -= len;
511 }
512
513 if (size)
514 {
515 pr_debug("last size %u not align %u, write them\n", size, blk->blk_bytes);
516
517 pr_debug("step1: read page data from addr 0x%x\n", offset);
518 memset(blk_buf, 0x00, sizeof(blk->blk_bytes));
519 ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, blk_buf, blk->blk_bytes);
520 if (ret != blk->blk_bytes)
521 {
522 goto err;
523 }
524
525 pr_debug("step2: copy buf to page data with page with len %u\n", size);
526 memcpy(blk_buf, data, size);
527
528 pr_debug("step3: flush the fixed page data\n");
529 ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
530 ret *= blk->blk_bytes;
531 if (ret != blk->blk_bytes)
532 {
533 goto err;
534 }
535 sz += size;
536 }
537 #ifdef DEBUG
538 pr_debug("write data:\n");
539 hexdump(data, sz);
540 #endif
541 ret = 0;
542 goto out;
543
544 err:
545 pr_err("write failed - %d\n", (int)ret);
546 out:
547
548 if (blk_buf)
549 {
550 free(blk_buf);
551 }
552 return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
553 }
part_erase_before_write(rt_device_t dev,rt_off_t offset,const void * data,rt_size_t size)554 rt_size_t part_erase_before_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
555 {
556 return _part_write(dev, offset, data, size, 1);
557 }
558
part_erase_without_write(rt_device_t dev,rt_off_t offset,const void * data,rt_size_t size)559 rt_size_t part_erase_without_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
560 {
561 return _part_write(dev, offset, data, size, 0);
562 }
563
part_control(rt_device_t dev,int cmd,void * args)564 rt_err_t part_control(rt_device_t dev, int cmd, void *args)
565 {
566 rt_err_t ret = -1;
567 struct part *part = (struct part *)dev->user_data;
568 struct blkpart *blk = part->blk;
569 rt_device_t spinor_dev = blk->dev;
570 struct rt_device_blk_geometry *geometry = NULL;
571
572 blk_dev_erase_t *erase_sector = (blk_dev_erase_t *)args;
573
574 switch (cmd)
575 {
576 case DEVICE_PART_CMD_ERASE_SECTOR:
577 erase_sector = (blk_dev_erase_t *)(args);
578
579 if (erase_sector->addr + erase_sector->len > part->bytes)
580 {
581 printf("erase %s(%s) over limit: offset %u + size %u over %lu\n",
582 part->name, part->devname, erase_sector->addr, erase_sector->len, part->bytes);
583 }
584
585 erase_sector->len = MIN(part->bytes - erase_sector->addr, erase_sector->len);
586 erase_sector->addr = erase_sector->addr + part->off;
587
588 if (spinor_dev && rt_dev_has_control(spinor_dev))
589 {
590 ret = rt_dev_control(spinor_dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, erase_sector);
591 }
592 break;
593 case DEVICE_PART_CMD_GET_BLOCK_SIZE:
594 if (spinor_dev && rt_dev_has_control(spinor_dev))
595 {
596 ret = rt_dev_control(spinor_dev, BLOCK_DEVICE_CMD_GET_BLOCK_SIZE, args);
597 }
598 else
599 {
600 ret = -1;
601 }
602 break;
603 case DEVICE_PART_CMD_GET_TOTAL_SIZE:
604 *(unsigned int *)args = part->bytes;
605 ret = 0;
606 break;
607 case RT_DEVICE_CTRL_BLK_GETGEOME:
608 geometry = (struct rt_device_blk_geometry *)args;
609 memset(geometry, 0, sizeof(struct rt_device_blk_geometry));
610 if (spinor_dev && rt_dev_has_control(spinor_dev))
611 {
612 ret = rt_dev_control(spinor_dev, RT_DEVICE_CTRL_BLK_GETGEOME, args);
613 if (!ret)
614 {
615 geometry->sector_count = part->bytes / geometry->bytes_per_sector;
616 ret = 0;
617 }
618 }
619
620 break;
621 case RT_DEVICE_CTRL_BLK_ERASE:
622 ret = 0;
623 break;
624 default:
625 break;
626 }
627
628 return ret;
629 }
630
631