1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * rfd_ftl.c -- resident flash disk (flash translation layer)
4  *
5  * Copyright © 2005  Sean Young <sean@mess.org>
6  *
7  * This type of flash translation layer (FTL) is used by the Embedded BIOS
8  * by General Software. It is known as the Resident Flash Disk (RFD), see:
9  *
10  *	http://www.gensw.com/pages/prod/bios/rfd.htm
11  *
12  * based on ftl.c
13  */
14 
15 #include <linux/hdreg.h>
16 #include <linux/init.h>
17 #include <linux/mtd/blktrans.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
21 #include <linux/jiffies.h>
22 #include <linux/module.h>
23 
24 #include <asm/types.h>
25 
26 static int block_size = 0;
27 module_param(block_size, int, 0);
28 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
29 
30 #define PREFIX "rfd_ftl: "
31 
32 /* This major has been assigned by device@lanana.org */
33 #ifndef RFD_FTL_MAJOR
34 #define RFD_FTL_MAJOR		256
35 #endif
36 
37 /* Maximum number of partitions in an FTL region */
38 #define PART_BITS		4
39 
40 /* An erase unit should start with this value */
41 #define RFD_MAGIC		0x9193
42 
43 /* the second value is 0xffff or 0xffc8; function unknown */
44 
45 /* the third value is always 0xffff, ignored */
46 
47 /* next is an array of mapping for each corresponding sector */
48 #define HEADER_MAP_OFFSET	3
49 #define SECTOR_DELETED		0x0000
50 #define SECTOR_ZERO		0xfffe
51 #define SECTOR_FREE		0xffff
52 
53 #define SECTOR_SIZE		512
54 
55 #define SECTORS_PER_TRACK	63
56 
57 struct block {
58 	enum {
59 		BLOCK_OK,
60 		BLOCK_ERASING,
61 		BLOCK_ERASED,
62 		BLOCK_UNUSED,
63 		BLOCK_FAILED
64 	} state;
65 	int free_sectors;
66 	int used_sectors;
67 	int erases;
68 	u_long offset;
69 };
70 
71 struct partition {
72 	struct mtd_blktrans_dev mbd;
73 
74 	u_int block_size;		/* size of erase unit */
75 	u_int total_blocks;		/* number of erase units */
76 	u_int header_sectors_per_block;	/* header sectors in erase unit */
77 	u_int data_sectors_per_block;	/* data sectors in erase unit */
78 	u_int sector_count;		/* sectors in translated disk */
79 	u_int header_size;		/* bytes in header sector */
80 	int reserved_block;		/* block next up for reclaim */
81 	int current_block;		/* block to write to */
82 	u16 *header_cache;		/* cached header */
83 
84 	int is_reclaiming;
85 	int cylinders;
86 	int errors;
87 	u_long *sector_map;
88 	struct block *blocks;
89 };
90 
91 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
92 
build_block_map(struct partition * part,int block_no)93 static int build_block_map(struct partition *part, int block_no)
94 {
95 	struct block *block = &part->blocks[block_no];
96 	int i;
97 
98 	block->offset = part->block_size * block_no;
99 
100 	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
101 		block->state = BLOCK_UNUSED;
102 		return -ENOENT;
103 	}
104 
105 	block->state = BLOCK_OK;
106 
107 	for (i=0; i<part->data_sectors_per_block; i++) {
108 		u16 entry;
109 
110 		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111 
112 		if (entry == SECTOR_DELETED)
113 			continue;
114 
115 		if (entry == SECTOR_FREE) {
116 			block->free_sectors++;
117 			continue;
118 		}
119 
120 		if (entry == SECTOR_ZERO)
121 			entry = 0;
122 
123 		if (entry >= part->sector_count) {
124 			printk(KERN_WARNING PREFIX
125 				"'%s': unit #%d: entry %d corrupt, "
126 				"sector %d out of range\n",
127 				part->mbd.mtd->name, block_no, i, entry);
128 			continue;
129 		}
130 
131 		if (part->sector_map[entry] != -1) {
132 			printk(KERN_WARNING PREFIX
133 				"'%s': more than one entry for sector %d\n",
134 				part->mbd.mtd->name, entry);
135 			part->errors = 1;
136 			continue;
137 		}
138 
139 		part->sector_map[entry] = block->offset +
140 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
141 
142 		block->used_sectors++;
143 	}
144 
145 	if (block->free_sectors == part->data_sectors_per_block)
146 		part->reserved_block = block_no;
147 
148 	return 0;
149 }
150 
scan_header(struct partition * part)151 static int scan_header(struct partition *part)
152 {
153 	int sectors_per_block;
154 	int i, rc = -ENOMEM;
155 	int blocks_found;
156 	size_t retlen;
157 
158 	sectors_per_block = part->block_size / SECTOR_SIZE;
159 	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160 
161 	if (part->total_blocks < 2)
162 		return -ENOENT;
163 
164 	/* each erase block has three bytes header, followed by the map */
165 	part->header_sectors_per_block =
166 			((HEADER_MAP_OFFSET + sectors_per_block) *
167 			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168 
169 	part->data_sectors_per_block = sectors_per_block -
170 			part->header_sectors_per_block;
171 
172 	part->header_size = (HEADER_MAP_OFFSET +
173 			part->data_sectors_per_block) * sizeof(u16);
174 
175 	part->cylinders = (part->data_sectors_per_block *
176 			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177 
178 	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179 
180 	part->current_block = -1;
181 	part->reserved_block = -1;
182 	part->is_reclaiming = 0;
183 
184 	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185 	if (!part->header_cache)
186 		goto err;
187 
188 	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
189 			GFP_KERNEL);
190 	if (!part->blocks)
191 		goto err;
192 
193 	part->sector_map = vmalloc(array_size(sizeof(u_long),
194 					      part->sector_count));
195 	if (!part->sector_map)
196 		goto err;
197 
198 	for (i=0; i<part->sector_count; i++)
199 		part->sector_map[i] = -1;
200 
201 	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202 		rc = mtd_read(part->mbd.mtd, i * part->block_size,
203 			      part->header_size, &retlen,
204 			      (u_char *)part->header_cache);
205 
206 		if (!rc && retlen != part->header_size)
207 			rc = -EIO;
208 
209 		if (rc)
210 			goto err;
211 
212 		if (!build_block_map(part, i))
213 			blocks_found++;
214 	}
215 
216 	if (blocks_found == 0) {
217 		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218 				part->mbd.mtd->name);
219 		rc = -ENOENT;
220 		goto err;
221 	}
222 
223 	if (part->reserved_block == -1) {
224 		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225 				part->mbd.mtd->name);
226 
227 		part->errors = 1;
228 	}
229 
230 	return 0;
231 
232 err:
233 	vfree(part->sector_map);
234 	kfree(part->header_cache);
235 	kfree(part->blocks);
236 
237 	return rc;
238 }
239 
rfd_ftl_readsect(struct mtd_blktrans_dev * dev,u_long sector,char * buf)240 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
241 {
242 	struct partition *part = container_of(dev, struct partition, mbd);
243 	u_long addr;
244 	size_t retlen;
245 	int rc;
246 
247 	if (sector >= part->sector_count)
248 		return -EIO;
249 
250 	addr = part->sector_map[sector];
251 	if (addr != -1) {
252 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
253 			      (u_char *)buf);
254 		if (!rc && retlen != SECTOR_SIZE)
255 			rc = -EIO;
256 
257 		if (rc) {
258 			printk(KERN_WARNING PREFIX "error reading '%s' at "
259 				"0x%lx\n", part->mbd.mtd->name, addr);
260 			return rc;
261 		}
262 	} else
263 		memset(buf, 0, SECTOR_SIZE);
264 
265 	return 0;
266 }
267 
erase_block(struct partition * part,int block)268 static int erase_block(struct partition *part, int block)
269 {
270 	struct erase_info *erase;
271 	int rc;
272 
273 	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
274 	if (!erase)
275 		return -ENOMEM;
276 
277 	erase->addr = part->blocks[block].offset;
278 	erase->len = part->block_size;
279 
280 	part->blocks[block].state = BLOCK_ERASING;
281 	part->blocks[block].free_sectors = 0;
282 
283 	rc = mtd_erase(part->mbd.mtd, erase);
284 	if (rc) {
285 		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
286 				"failed\n", (unsigned long long)erase->addr,
287 				(unsigned long long)erase->len, part->mbd.mtd->name);
288 		part->blocks[block].state = BLOCK_FAILED;
289 		part->blocks[block].free_sectors = 0;
290 		part->blocks[block].used_sectors = 0;
291 	} else {
292 		u16 magic = cpu_to_le16(RFD_MAGIC);
293 		size_t retlen;
294 
295 		part->blocks[block].state = BLOCK_ERASED;
296 		part->blocks[block].free_sectors = part->data_sectors_per_block;
297 		part->blocks[block].used_sectors = 0;
298 		part->blocks[block].erases++;
299 
300 		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
301 			       sizeof(magic), &retlen, (u_char *)&magic);
302 		if (!rc && retlen != sizeof(magic))
303 			rc = -EIO;
304 
305 		if (rc) {
306 			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
307 			       part->mbd.mtd->name, part->blocks[block].offset);
308 			part->blocks[block].state = BLOCK_FAILED;
309 		} else {
310 			part->blocks[block].state = BLOCK_OK;
311 		}
312 	}
313 
314 	kfree(erase);
315 
316 	return rc;
317 }
318 
move_block_contents(struct partition * part,int block_no,u_long * old_sector)319 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
320 {
321 	void *sector_data;
322 	u16 *map;
323 	size_t retlen;
324 	int i, rc = -ENOMEM;
325 
326 	part->is_reclaiming = 1;
327 
328 	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
329 	if (!sector_data)
330 		goto err3;
331 
332 	map = kmalloc(part->header_size, GFP_KERNEL);
333 	if (!map)
334 		goto err2;
335 
336 	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
337 		      part->header_size, &retlen, (u_char *)map);
338 
339 	if (!rc && retlen != part->header_size)
340 		rc = -EIO;
341 
342 	if (rc) {
343 		printk(KERN_ERR PREFIX "error reading '%s' at "
344 			"0x%lx\n", part->mbd.mtd->name,
345 			part->blocks[block_no].offset);
346 
347 		goto err;
348 	}
349 
350 	for (i=0; i<part->data_sectors_per_block; i++) {
351 		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
352 		u_long addr;
353 
354 
355 		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
356 			continue;
357 
358 		if (entry == SECTOR_ZERO)
359 			entry = 0;
360 
361 		/* already warned about and ignored in build_block_map() */
362 		if (entry >= part->sector_count)
363 			continue;
364 
365 		addr = part->blocks[block_no].offset +
366 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
367 
368 		if (*old_sector == addr) {
369 			*old_sector = -1;
370 			if (!part->blocks[block_no].used_sectors--) {
371 				rc = erase_block(part, block_no);
372 				break;
373 			}
374 			continue;
375 		}
376 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
377 			      sector_data);
378 
379 		if (!rc && retlen != SECTOR_SIZE)
380 			rc = -EIO;
381 
382 		if (rc) {
383 			printk(KERN_ERR PREFIX "'%s': Unable to "
384 				"read sector for relocation\n",
385 				part->mbd.mtd->name);
386 
387 			goto err;
388 		}
389 
390 		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
391 				entry, sector_data);
392 
393 		if (rc)
394 			goto err;
395 	}
396 
397 err:
398 	kfree(map);
399 err2:
400 	kfree(sector_data);
401 err3:
402 	part->is_reclaiming = 0;
403 
404 	return rc;
405 }
406 
reclaim_block(struct partition * part,u_long * old_sector)407 static int reclaim_block(struct partition *part, u_long *old_sector)
408 {
409 	int block, best_block, score, old_sector_block;
410 	int rc;
411 
412 	/* we have a race if sync doesn't exist */
413 	mtd_sync(part->mbd.mtd);
414 
415 	score = 0x7fffffff; /* MAX_INT */
416 	best_block = -1;
417 	if (*old_sector != -1)
418 		old_sector_block = *old_sector / part->block_size;
419 	else
420 		old_sector_block = -1;
421 
422 	for (block=0; block<part->total_blocks; block++) {
423 		int this_score;
424 
425 		if (block == part->reserved_block)
426 			continue;
427 
428 		/*
429 		 * Postpone reclaiming if there is a free sector as
430 		 * more removed sectors is more efficient (have to move
431 		 * less).
432 		 */
433 		if (part->blocks[block].free_sectors)
434 			return 0;
435 
436 		this_score = part->blocks[block].used_sectors;
437 
438 		if (block == old_sector_block)
439 			this_score--;
440 		else {
441 			/* no point in moving a full block */
442 			if (part->blocks[block].used_sectors ==
443 					part->data_sectors_per_block)
444 				continue;
445 		}
446 
447 		this_score += part->blocks[block].erases;
448 
449 		if (this_score < score) {
450 			best_block = block;
451 			score = this_score;
452 		}
453 	}
454 
455 	if (best_block == -1)
456 		return -ENOSPC;
457 
458 	part->current_block = -1;
459 	part->reserved_block = best_block;
460 
461 	pr_debug("reclaim_block: reclaiming block #%d with %d used "
462 		 "%d free sectors\n", best_block,
463 		 part->blocks[best_block].used_sectors,
464 		 part->blocks[best_block].free_sectors);
465 
466 	if (part->blocks[best_block].used_sectors)
467 		rc = move_block_contents(part, best_block, old_sector);
468 	else
469 		rc = erase_block(part, best_block);
470 
471 	return rc;
472 }
473 
474 /*
475  * IMPROVE: It would be best to choose the block with the most deleted sectors,
476  * because if we fill that one up first it'll have the most chance of having
477  * the least live sectors at reclaim.
478  */
find_free_block(struct partition * part)479 static int find_free_block(struct partition *part)
480 {
481 	int block, stop;
482 
483 	block = part->current_block == -1 ?
484 			jiffies % part->total_blocks : part->current_block;
485 	stop = block;
486 
487 	do {
488 		if (part->blocks[block].free_sectors &&
489 				block != part->reserved_block)
490 			return block;
491 
492 		if (part->blocks[block].state == BLOCK_UNUSED)
493 			erase_block(part, block);
494 
495 		if (++block >= part->total_blocks)
496 			block = 0;
497 
498 	} while (block != stop);
499 
500 	return -1;
501 }
502 
find_writable_block(struct partition * part,u_long * old_sector)503 static int find_writable_block(struct partition *part, u_long *old_sector)
504 {
505 	int rc, block;
506 	size_t retlen;
507 
508 	block = find_free_block(part);
509 
510 	if (block == -1) {
511 		if (!part->is_reclaiming) {
512 			rc = reclaim_block(part, old_sector);
513 			if (rc)
514 				goto err;
515 
516 			block = find_free_block(part);
517 		}
518 
519 		if (block == -1) {
520 			rc = -ENOSPC;
521 			goto err;
522 		}
523 	}
524 
525 	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
526 		      part->header_size, &retlen,
527 		      (u_char *)part->header_cache);
528 
529 	if (!rc && retlen != part->header_size)
530 		rc = -EIO;
531 
532 	if (rc) {
533 		printk(KERN_ERR PREFIX "'%s': unable to read header at "
534 				"0x%lx\n", part->mbd.mtd->name,
535 				part->blocks[block].offset);
536 		goto err;
537 	}
538 
539 	part->current_block = block;
540 
541 err:
542 	return rc;
543 }
544 
mark_sector_deleted(struct partition * part,u_long old_addr)545 static int mark_sector_deleted(struct partition *part, u_long old_addr)
546 {
547 	int block, offset, rc;
548 	u_long addr;
549 	size_t retlen;
550 	u16 del = cpu_to_le16(SECTOR_DELETED);
551 
552 	block = old_addr / part->block_size;
553 	offset = (old_addr % part->block_size) / SECTOR_SIZE -
554 		part->header_sectors_per_block;
555 
556 	addr = part->blocks[block].offset +
557 			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
558 	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
559 		       (u_char *)&del);
560 
561 	if (!rc && retlen != sizeof(del))
562 		rc = -EIO;
563 
564 	if (rc) {
565 		printk(KERN_ERR PREFIX "error writing '%s' at "
566 			"0x%lx\n", part->mbd.mtd->name, addr);
567 		goto err;
568 	}
569 	if (block == part->current_block)
570 		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
571 
572 	part->blocks[block].used_sectors--;
573 
574 	if (!part->blocks[block].used_sectors &&
575 	    !part->blocks[block].free_sectors)
576 		rc = erase_block(part, block);
577 
578 err:
579 	return rc;
580 }
581 
find_free_sector(const struct partition * part,const struct block * block)582 static int find_free_sector(const struct partition *part, const struct block *block)
583 {
584 	int i, stop;
585 
586 	i = stop = part->data_sectors_per_block - block->free_sectors;
587 
588 	do {
589 		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
590 				== SECTOR_FREE)
591 			return i;
592 
593 		if (++i == part->data_sectors_per_block)
594 			i = 0;
595 	}
596 	while(i != stop);
597 
598 	return -1;
599 }
600 
do_writesect(struct mtd_blktrans_dev * dev,u_long sector,char * buf,ulong * old_addr)601 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
602 {
603 	struct partition *part = container_of(dev, struct partition, mbd);
604 	struct block *block;
605 	u_long addr;
606 	int i;
607 	int rc;
608 	size_t retlen;
609 	u16 entry;
610 
611 	if (part->current_block == -1 ||
612 		!part->blocks[part->current_block].free_sectors) {
613 
614 		rc = find_writable_block(part, old_addr);
615 		if (rc)
616 			goto err;
617 	}
618 
619 	block = &part->blocks[part->current_block];
620 
621 	i = find_free_sector(part, block);
622 
623 	if (i < 0) {
624 		rc = -ENOSPC;
625 		goto err;
626 	}
627 
628 	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
629 		block->offset;
630 	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
631 		       (u_char *)buf);
632 
633 	if (!rc && retlen != SECTOR_SIZE)
634 		rc = -EIO;
635 
636 	if (rc) {
637 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
638 				part->mbd.mtd->name, addr);
639 		goto err;
640 	}
641 
642 	part->sector_map[sector] = addr;
643 
644 	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
645 
646 	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
647 
648 	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
649 	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
650 		       (u_char *)&entry);
651 
652 	if (!rc && retlen != sizeof(entry))
653 		rc = -EIO;
654 
655 	if (rc) {
656 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
657 				part->mbd.mtd->name, addr);
658 		goto err;
659 	}
660 	block->used_sectors++;
661 	block->free_sectors--;
662 
663 err:
664 	return rc;
665 }
666 
rfd_ftl_writesect(struct mtd_blktrans_dev * dev,u_long sector,char * buf)667 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
668 {
669 	struct partition *part = container_of(dev, struct partition, mbd);
670 	u_long old_addr;
671 	int i;
672 	int rc = 0;
673 
674 	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
675 
676 	if (part->reserved_block == -1) {
677 		rc = -EACCES;
678 		goto err;
679 	}
680 
681 	if (sector >= part->sector_count) {
682 		rc = -EIO;
683 		goto err;
684 	}
685 
686 	old_addr = part->sector_map[sector];
687 
688 	for (i=0; i<SECTOR_SIZE; i++) {
689 		if (!buf[i])
690 			continue;
691 
692 		rc = do_writesect(dev, sector, buf, &old_addr);
693 		if (rc)
694 			goto err;
695 		break;
696 	}
697 
698 	if (i == SECTOR_SIZE)
699 		part->sector_map[sector] = -1;
700 
701 	if (old_addr != -1)
702 		rc = mark_sector_deleted(part, old_addr);
703 
704 err:
705 	return rc;
706 }
707 
rfd_ftl_discardsect(struct mtd_blktrans_dev * dev,unsigned long sector,unsigned int nr_sects)708 static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
709 			       unsigned long sector, unsigned int nr_sects)
710 {
711 	struct partition *part = container_of(dev, struct partition, mbd);
712 	u_long addr;
713 	int rc;
714 
715 	while (nr_sects) {
716 		if (sector >= part->sector_count)
717 			return -EIO;
718 
719 		addr = part->sector_map[sector];
720 
721 		if (addr != -1) {
722 			rc = mark_sector_deleted(part, addr);
723 			if (rc)
724 				return rc;
725 
726 			part->sector_map[sector] = -1;
727 		}
728 
729 		sector++;
730 		nr_sects--;
731 	}
732 
733 	return 0;
734 }
735 
rfd_ftl_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)736 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
737 {
738 	struct partition *part = container_of(dev, struct partition, mbd);
739 
740 	geo->heads = 1;
741 	geo->sectors = SECTORS_PER_TRACK;
742 	geo->cylinders = part->cylinders;
743 
744 	return 0;
745 }
746 
rfd_ftl_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)747 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
748 {
749 	struct partition *part;
750 
751 	if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
752 	    mtd->size > UINT_MAX)
753 		return;
754 
755 	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
756 	if (!part)
757 		return;
758 
759 	part->mbd.mtd = mtd;
760 
761 	if (block_size)
762 		part->block_size = block_size;
763 	else {
764 		if (!mtd->erasesize) {
765 			printk(KERN_WARNING PREFIX "please provide block_size");
766 			goto out;
767 		} else
768 			part->block_size = mtd->erasesize;
769 	}
770 
771 	if (scan_header(part) == 0) {
772 		part->mbd.size = part->sector_count;
773 		part->mbd.tr = tr;
774 		part->mbd.devnum = -1;
775 		if (!(mtd->flags & MTD_WRITEABLE))
776 			part->mbd.readonly = 1;
777 		else if (part->errors) {
778 			printk(KERN_WARNING PREFIX "'%s': errors found, "
779 					"setting read-only\n", mtd->name);
780 			part->mbd.readonly = 1;
781 		}
782 
783 		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
784 				mtd->name, mtd->type, mtd->flags);
785 
786 		if (!add_mtd_blktrans_dev(&part->mbd))
787 			return;
788 	}
789 out:
790 	kfree(part);
791 }
792 
rfd_ftl_remove_dev(struct mtd_blktrans_dev * dev)793 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
794 {
795 	struct partition *part = container_of(dev, struct partition, mbd);
796 	int i;
797 
798 	for (i=0; i<part->total_blocks; i++) {
799 		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
800 			part->mbd.mtd->name, i, part->blocks[i].erases);
801 	}
802 
803 	vfree(part->sector_map);
804 	kfree(part->header_cache);
805 	kfree(part->blocks);
806 	del_mtd_blktrans_dev(&part->mbd);
807 }
808 
809 static struct mtd_blktrans_ops rfd_ftl_tr = {
810 	.name		= "rfd",
811 	.major		= RFD_FTL_MAJOR,
812 	.part_bits	= PART_BITS,
813 	.blksize 	= SECTOR_SIZE,
814 
815 	.readsect	= rfd_ftl_readsect,
816 	.writesect	= rfd_ftl_writesect,
817 	.discard	= rfd_ftl_discardsect,
818 	.getgeo		= rfd_ftl_getgeo,
819 	.add_mtd	= rfd_ftl_add_mtd,
820 	.remove_dev	= rfd_ftl_remove_dev,
821 	.owner		= THIS_MODULE,
822 };
823 
824 module_mtd_blktrans(rfd_ftl_tr);
825 
826 MODULE_LICENSE("GPL");
827 MODULE_AUTHOR("Sean Young <sean@mess.org>");
828 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
829 		"used by General Software's Embedded BIOS");
830 
831