1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "ftlnp.h"
6
7 #if INC_FTL_NDM
8 // Local Function Definitions
9
10 // format_ftl: Erase all non-free blocks
11 //
12 // Input: ftl = pointer to FTL control block
13 //
14 // Returns: 0 on success, -1 on error
15 //
format_ftl(FTLN ftl)16 static int format_ftl(FTLN ftl) {
17 ui32 meta_block;
18
19 // Get number of block that will hold the metapage.
20 if (ftl->free_mpn == (ui32)-1)
21 meta_block = FtlnLoWcFreeBlk(ftl);
22 else
23 meta_block = ftl->free_mpn / ftl->pgs_per_blk;
24
25 // Write meta page, to indicate that format is in progress.
26 memset(ftl->main_buf, 0xFF, ftl->page_size);
27 if (FtlnMetaWr(ftl, CONT_FORMAT))
28 return -1;
29
30 // Erase all map blocks, mark all blocks free, and reset the FTL.
31 return FtlnFormat(ftl, meta_block);
32 }
33
34 // set_high_wc: Set highest wear count and adjust wear offsets
35 //
36 // Inputs: ftl = pointer to FTL control block
37 // high_b = block with new highest wear count
38 // high_b_wc = new highest wear count
39 //
set_high_wc(FTLN ftl,ui32 high_b,ui32 high_b_wc)40 static void set_high_wc(FTLN ftl, ui32 high_b, ui32 high_b_wc) {
41 ui32 b;
42
43 // Highest wear count should only go up by one and new highest block
44 // should have contained highest wear (0 'high_wc' lag) before.
45 PfAssert(ftl->high_wc + 1 == high_b_wc && ftl->blk_wc_lag[high_b] == 0);
46
47 // Loop over all other blocks adjusting their 'high_wc' lags.
48 for (b = 0; b < ftl->num_blks; ++b)
49 if (b != high_b) {
50 if (ftl->blk_wc_lag[b] < 0xFF)
51 ++ftl->blk_wc_lag[b];
52 #if FTLN_DEBUG
53 else
54 ++ftl->max_wc_over;
55
56 // If new value, record maximum encountered wear lag.
57 if (ftl->max_wc_lag < ftl->blk_wc_lag[b])
58 ftl->max_wc_lag = ftl->blk_wc_lag[b];
59 #endif
60 }
61
62 // Update highest wear count.
63 ftl->high_wc = high_b_wc;
64 }
65
66 // first_free_blk: Find the first free block, counting from block zero
67 //
68 // Input: ftl = pointer to FTL control block
69 //
70 // Returns: block number if successful, else (ui32)-1 if none free
71 //
first_free_blk(CFTLN ftl)72 static ui32 first_free_blk(CFTLN ftl) {
73 ui32 b;
74
75 // Search for first free block.
76 for (b = 0;; ++b) {
77 // Return error if no block is free.
78 if (b == ftl->num_blks)
79 return (ui32)FsError(ENOSPC);
80
81 // If block is free, return its block number.
82 if (IS_FREE(ftl->bdata[b]))
83 return b;
84 }
85 }
86
87 // Global Function Definitions
88
89 // FtlnReport: Callback function used by upper file system layer to
90 // notify FTL of events
91 //
92 // Inputs: vol = FTL handle
93 // msg = event
94 // ... = additional arguments
95 //
96 // Returns: 0 or 1 (unformat()) for success, -1 on failure
97 //
FtlnReport(void * vol,ui32 msg,...)98 int FtlnReport(void* vol, ui32 msg, ...) {
99 FTLN ftl = vol;
100 va_list ap;
101
102 // Set errno and return -1 if fatal I/O error occurred.
103 if (ftl->flags & FTLN_FATAL_ERR)
104 return FsError(EIO);
105
106 // Handle event passed down from file system layer.
107 switch (msg) {
108 case FS_UNFORMAT: {
109 ui32 b;
110
111 // Return error if volume is mounted.
112 if (ftl->flags & FTLN_MOUNTED)
113 return FsError(EEXIST);
114
115 // Format volume. Return -1 if error.
116 if (format_ftl(ftl))
117 return -1;
118
119 // Erase every unerased block. Return -1 if error.
120 for (b = 0; b < ftl->num_blks; ++b)
121 if ((ftl->bdata[b] & ERASED_BLK_FLAG) == FALSE)
122 if (FtlnEraseBlk(ftl, b))
123 return -1;
124
125 // Delete volume (both FTL and FS). Free its memory. Volume is
126 // unmounted, so nothing to flush. Return value can be ignored.
127 FtlnDelVol(ftl);
128
129 // Return '1' for success.
130 return 1;
131 }
132
133 case FS_PAGE_SZ:
134 return ftl->page_size;
135
136 case FS_FAT_SECTS:
137 return ftl->num_vsects;
138
139 case FS_FORMAT:
140 case FS_FORMAT_RESET_WC: {
141 #if INC_FAT_MBR
142 // If there is an MBR present, save it. Return -1 if error.
143 if (ftl->vol_frst_sect)
144 if (FtlnRdSects(ftl->main_buf, 0, 1, ftl))
145 return -1;
146 #endif
147
148 // Format volume. Return -1 if error.
149 if (format_ftl(ftl))
150 return -1;
151
152 // Check if we're to equalize the wear counts (for benchmarking).
153 if (msg == FS_FORMAT_RESET_WC) {
154 ui32 b, avg_lag = 0;
155
156 // Compute average wear count and assign to every block.
157 for (b = 0; b < ftl->num_blks; ++b) avg_lag += ftl->blk_wc_lag[b];
158 avg_lag /= ftl->num_blks;
159 ftl->high_wc -= avg_lag;
160 for (b = 0; b < ftl->num_blks; ++b) ftl->blk_wc_lag[b] = 0;
161 }
162
163 #if INC_FAT_MBR
164 // If there was an MBR present, rewrite it. Return -1 if error.
165 if (ftl->vol_frst_sect)
166 if (FtlnWrSects(ftl->main_buf, 0, 1, ftl))
167 return -1;
168 #endif
169
170 // Return success.
171 return 0;
172 }
173
174 case FS_VCLEAN:
175 return FtlnVclean(ftl);
176
177 case FS_UNMOUNT:
178 // Return error if not mounted.
179 if ((ftl->flags & FTLN_MOUNTED) == FALSE)
180 return FsError(ENOENT);
181
182 // Clear the 'mounted' flag.
183 ftl->flags &= ~FTLN_MOUNTED;
184 // FALLTHROUGH
185
186 case FS_SYNC: {
187 #if INC_FTL_PAGE_CACHE
188 // If there is a volume page cache, save all dirty pages.
189 if (ftl->vol_cache)
190 if (FcFlush(ftl->vol_cache) == -1)
191 return -1;
192 #endif
193
194 // Prepare to write all dirty map cache pages. Return -1 if err.
195 if (FtlnRecCheck(ftl, 0))
196 return -1;
197
198 // Save all dirty map pages to flash. Return -1 if error.
199 if (ftlmcFlushMaps(ftl->map_cache))
200 return -1;
201 PfAssert(ftl->num_free_blks >= FTLN_MIN_FREE_BLKS);
202
203 #if INC_FTL_NDM_MLC
204 // For MLC devices, advance free_vpn pointer so next volume page
205 // write can't corrupt previously written valid page.
206 FtlnMlcSafeFreeVpn(ftl);
207 #endif
208
209 // If request was for sync, return success now.
210 if (msg == FS_SYNC)
211 return 0;
212
213 #if INC_ELIST
214 // Check if there is not a current erased-block list.
215 if (ftl->elist_blk == (ui32)-1) {
216 ui32 b, n;
217
218 // Count the number of erased free blocks.
219 for (n = b = 0; b < ftl->num_blks; ++b)
220 if (IS_ERASED(ftl->bdata[b]))
221 ++n;
222
223 // Only write erased list if more than 1 block is erased.
224 if (n > 1) {
225 ui32 wc, *lp, prior_free_mpn;
226 ui32* end = (ui32*)(ftl->main_buf + ftl->page_size);
227
228 // Save free map page number and force elist writes to begin
229 // on first page of a free map block.
230 prior_free_mpn = ftl->free_mpn;
231 ftl->free_mpn = (ui32)-1;
232
233 // Set pointer to write first entry on page.
234 lp = (ui32*)(ftl->main_buf + FTLN_META_DATA_BEG);
235
236 // Loop to find erased free blocks.
237 for (b = 0;;) {
238 if (IS_ERASED(ftl->bdata[b])) {
239 #if DEBUG_ELIST
240 // Verify that this block is unwritten.
241 FtlnCheckBlank(ftl, b);
242 #endif
243
244 // Write block number and wear count of erased block.
245 WR32_LE(b, lp);
246 ++lp;
247 wc = ftl->high_wc - ftl->blk_wc_lag[b];
248 WR32_LE(wc, lp);
249 ++lp;
250
251 // If all blocks recorded, fill rest of page with -1.
252 if (--n == 0)
253 while (lp != end) {
254 WR32_LE(-1, lp);
255 ++lp;
256 }
257
258 // Check if page is full.
259 if (lp == end) {
260 // Write page of erased list data.
261 if (FtlnMetaWr(ftl, ERASED_LIST))
262 return -1;
263
264 // Break if all erased blocks have been recorded.
265 if (n == 0)
266 break;
267
268 // Reset pointer to write next entry on new page.
269 lp = (ui32*)(ftl->main_buf + FTLN_META_DATA_BEG);
270
271 // Assert not at block end. That requires 16B pages.
272 PfAssert(ftl->free_mpn != (ui32)-1);
273 }
274 }
275
276 // Check if no blocks left to test.
277 if (++b == ftl->num_blks) {
278 // If unwritten data in last page, write it now.
279 if (lp != (ui32*)(ftl->main_buf + FTLN_META_DATA_BEG))
280 if (FtlnMetaWr(ftl, ERASED_LIST))
281 return -1;
282
283 // List is finished, break.
284 break;
285 }
286 }
287
288 // Save elist block number and restore free map page number.
289 ftl->elist_blk = ftl->free_mpn / ftl->pgs_per_blk;
290 ftl->bdata[ftl->elist_blk] = FREE_BLK_FLAG;
291 ++ftl->num_free_blks;
292 ftl->free_mpn = prior_free_mpn;
293 }
294 }
295 #endif // INC_ELIST
296
297 #if FTLN_DEBUG > 1
298 // Display FTL statistics.
299 FtlnStats(ftl);
300 FtlnBlkStats(ftl);
301 #endif
302
303 // Return success.
304 return 0;
305 }
306
307 case FS_FLUSH_SECT: {
308 ui32 vsn, mpn;
309
310 // Use the va_arg mechanism to get virtual sector to be flushed.
311 va_start(ap, msg);
312 vsn = va_arg(ap, ui32);
313 va_end(ap);
314
315 // Check argument for validity.
316 if (vsn > ftl->num_vsects)
317 return -1;
318
319 #if INC_FAT_MBR
320 // If cluster sector, ensure it is page aligned.
321 if (vsn >= ftl->frst_clust_sect)
322 vsn += ftl->clust_off;
323 #endif
324
325 #if INC_FTL_PAGE_CACHE
326 // If volume pages are cached, flush page if present and dirty.
327 if (ftl->vol_cache)
328 if (ftlvcFlushPage(ftl->vol_cache, vsn / ftl->sects_per_page))
329 return -1;
330 #endif
331
332 // Figure out MPN this sector belongs to.
333 mpn = (vsn / ftl->sects_per_page) / ftl->mappings_per_mpg;
334
335 // Flush MPN from cache. Return -1 if error.
336 if (ftlmcFlushPage(ftl->map_cache, mpn))
337 return -1;
338
339 #if INC_FTL_NDM_MLC
340 // For MLC devices, advance free_vpn pointer so next volume page
341 // write can't corrupt previously written valid page.
342 FtlnMlcSafeFreeVpn(ftl);
343 #endif
344
345 // Return success.
346 return 0;
347 }
348
349 case FS_MARK_UNUSED: {
350 ui32 ppn, vsn, count, past_end, vpn;
351
352 // Use va_arg mechanism to get the starting sector and number of
353 // sectors to be invalidated.
354 va_start(ap, msg);
355 vsn = va_arg(ap, ui32);
356 count = va_arg(ap, ui32);
357 va_end(ap);
358
359 // Check arguments for validity.
360 if (vsn + count > ftl->num_vsects)
361 return -1;
362
363 #if INC_FAT_MBR
364 // Ensure cluster requests are page aligned.
365 if (vsn >= ftl->frst_clust_sect)
366 vsn += ftl->clust_off;
367 #endif
368
369 #if INC_SECT_FTL
370 // If starting sector is not page aligned, move to next whole
371 // page if any.
372 if (vsn % ftl->sects_per_page) {
373 ui32 round_off;
374
375 // Compute page round off based on starting sector.
376 round_off = ftl->sects_per_page - vsn % ftl->sects_per_page;
377
378 // If request is for less than one full page, return -1.
379 if (count < round_off)
380 return -1;
381
382 // Adjust count and starting sector to account for round off.
383 count -= round_off;
384 vsn += round_off;
385 }
386
387 // Ensure whole number of pages are marked dirty.
388 count -= count % ftl->sects_per_page;
389 if (count == 0)
390 return -1;
391 #endif
392
393 // Compute first and one past last page that will be dirty.
394 vpn = vsn / ftl->sects_per_page;
395 past_end = vpn + count / ftl->sects_per_page;
396
397 // Mark page(s) unused in FTL.
398 for (; vpn < past_end; ++vpn) {
399 // Prepare to potentially write 1 map page. Return -1 if error.
400 if (FtlnRecCheck(ftl, -1))
401 return -1;
402
403 // Retrieve physical page number for VPN. Return -1 if error.
404 if (FtlnMapGetPpn(ftl, vpn, &ppn) < 0)
405 return -1;
406
407 // If unmapped, skip page.
408 if (ppn == (ui32)-1)
409 continue;
410
411 #if FS_ASSERT
412 // Confirm no physical page number changes below.
413 ftl->assert_no_recycle = TRUE;
414 #endif
415
416 // Assign invalid value to VPN's physical page number and
417 // decrement block's used page count.
418 if (FtlnMapSetPpn(ftl, vpn, (ui32)-1))
419 return -1;
420 PfAssert(ftl->num_free_blks >= FTLN_MIN_FREE_BLKS);
421 FtlnDecUsed(ftl, ppn, vpn);
422
423 #if FS_ASSERT
424 // End check for no physical page number changes.
425 ftl->assert_no_recycle = FALSE;
426 #endif
427
428 #if INC_FTL_PAGE_CACHE
429 // If volume pages cache, remove page entry if cached.
430 if (ftl->vol_cache)
431 FcRmvEntry(ftl->vol_cache, vpn);
432 #endif
433 }
434
435 // Return success.
436 return 0;
437 }
438
439 case FS_VSTAT: {
440 union vstat* buf;
441
442 // Use the va_arg mechanism to get the vstat buffer.
443 va_start(ap, msg);
444 buf = (union vstat*)va_arg(ap, void*);
445 va_end(ap);
446
447 // Get the garbage level.
448 buf->fat.garbage_level = FtlnGarbLvl(ftl);
449
450 // Get TargetFTL-NDM RAM usage.
451 ftl->stats.ram_used = sizeof(struct ftln) + ftl->num_map_pgs * sizeof(ui32) +
452 #if INC_SECT_FTL
453 2 * ftl->page_size + ftl->eb_size * ftl->pgs_per_blk +
454 #else
455 1 * ftl->page_size + ftl->eb_size * ftl->pgs_per_blk +
456 #endif
457 ftlmcRAM(ftl->map_cache) +
458 ftl->num_blks * (sizeof(ui32) + sizeof(ui8));
459 #if INC_FTL_PAGE_CACHE
460 if (ftl->vol_cache)
461 ftl->stats.ram_used += FcRAM(ftl->vol_cache);
462 #endif
463 #if FTLN_DEBUG > 1
464 printf("TargetFTL-NDM RAM usage:\n");
465 printf(" - sizeof(Ftln) : %u\n", (int)sizeof(FTLN));
466 printf(" - tmp buffers : %u\n",
467 #if INC_SECT_FTL
468 2 * ftl->page_size + ftl->eb_size * ftl->pgs_per_blk);
469 #else
470 1 * ftl->page_size + ftl->eb_size * ftl->pgs_per_blk);
471 #endif
472 printf(" - map pages : %u\n", ftl->num_map_pgs * 4);
473 printf(" - map cache : %u\n", ftlmcRAM(ftl->map_cache));
474 #if INC_FTL_PAGE_CACHE
475 if (ftl->vol_cache)
476 printf(" - vol pg cache : %u\n", FcRAM(ftl->vol_cache));
477 #endif
478 printf(" - bdata[] : %u\n", ftl->num_blks * (int)(sizeof(ui32) + sizeof(ui8)));
479 #endif
480
481 // Record high wear count.
482 ftl->stats.wear_count = ftl->high_wc;
483
484 // Set TargetFTL-NDM driver call counts and reset internal ones.
485 buf->fat.drvr_stats.ftl.ndm = ftl->stats;
486 buf->fat.ftl_type = FTL_NDM;
487 bzero(&ftl->stats, sizeof(ftl_ndm_stats));
488
489 // Return success.
490 return 0;
491 }
492
493 case FS_MOUNT:
494 // Return error if already mounted. Else set mounted flag.
495 if (ftl->flags & FTLN_MOUNTED)
496 return FsError(EEXIST);
497 ftl->flags |= FTLN_MOUNTED;
498
499 #if FTLN_DEBUG > 1
500 // Display FTL statistics.
501 FtlnStats(ftl);
502 FtlnBlkStats(ftl);
503 #else
504 printf("FTL: total blocks: %u, free blocks: %u\n", ftl->num_blks, ftl->num_free_blks);
505 #endif
506
507 // Return success.
508 return 0;
509 }
510
511 // Return success.
512 return 0;
513 }
514
515 #if INC_FTL_NDM_MLC
516 // FtlnMlcSafeFreeVpn: For MLC devices, ensure free_vpn pointer is
517 // on a page whose pair is at a higher offset than the
518 // last non-free page
519 //
520 // Input: ftl = pointer to FTL control block
521 //
FtlnMlcSafeFreeVpn(FTLN ftl)522 void FtlnMlcSafeFreeVpn(FTLN ftl) {
523 // Only adjust MLC volumes for which volume free pointer is set.
524 if ((ftl->type == NDM_MLC) && (ftl->free_vpn != (ui32)-1)) {
525 ui32 pn = ndmPastPrevPair(ftl->ndm, ftl->free_vpn);
526
527 #if FTLN_DEBUG
528 printf("FtlnMlcSafeFreeVpn: old free = %u, new free = %u\n", ftl->free_vpn, pn);
529 #endif
530 ftl->free_vpn = pn;
531 }
532 }
533 #endif // INC_FTL_NDM_MLC
534
535 #if INC_FAT_MBR
536 // FtlnSetClustSect1: Set frst_clust_sect from FAT boot sector
537 //
538 // Inputs: vol = FTL handle
539 // bpb = buffer with contents of FAT boot sector
540 // format_req = TRUE when FAT format ongoing
541 //
542 // Returns: 0 on success, -1 on failure
543 //
544 // Note: Because a boot sector is optional, this routine does
545 // not fail if a boot sector is not found. It only fails
546 // if an allocation or I/O error occurs.
547 //
FtlnSetClustSect1(FTLN ftl,const ui8 * bpb,int format_req)548 int FtlnSetClustSect1(FTLN ftl, const ui8* bpb, int format_req) {
549 ui16 rsrvd_sects, num_fats, root_ents, root_sects;
550 ui32 sects_per_fat, old_clust_off, ssize;
551 int rc = 0;
552
553 // Check the boot sector signature. Return 0 if not boot sector.
554 if (bpb[510] != 0x55 && bpb[511] != 0xAA)
555 return 0;
556
557 // Check the first byte of the jump instruction for the boot code.
558 if (bpb[0] != 0xEB && bpb[0] != 0xE9)
559 return 0;
560
561 // Check that the sector size is valid.
562 ssize = RD16_LE(&bpb[11]);
563 if (ssize != 512 && ssize != 1024 && ssize != 2048 && ssize != 4096)
564 return 0;
565
566 // Get the number of FAT reserved sectors.
567 rsrvd_sects = RD16_LE(&bpb[14]);
568
569 // Get the number of FAT tables.
570 num_fats = bpb[16];
571
572 // Get the size of a FAT table.
573 sects_per_fat = RD16_LE(&bpb[22]);
574 if (sects_per_fat == 0)
575 sects_per_fat = RD32_LE(&bpb[36]);
576
577 // Get the number of root entries and figure out root size.
578 root_ents = RD16_LE(&bpb[17]);
579 root_sects = (root_ents * 32 + FAT_SECT_SZ - 1) / FAT_SECT_SZ;
580
581 // Figure out where the first sector of first cluster is.
582 ftl->frst_clust_sect = ftl->vol_frst_sect + num_fats * sects_per_fat + rsrvd_sects + root_sects;
583
584 // Calculate offset needed to page-align the cluster sectors.
585 old_clust_off = ftl->clust_off;
586 ftl->clust_off = ftl->sects_per_page - (ftl->frst_clust_sect % ftl->sects_per_page);
587
588 // If this is a FAT32 volume and the offset is changed during a
589 // format request, need to clear the root directory cluster to
590 // account for the change in offset.
591 if (root_sects == 0 && old_clust_off != ftl->clust_off && format_req) {
592 ui32 root_1st_clust, root_1st_sect;
593 ui8 sects_per_clust, *clust_buf;
594
595 // Retrieve cluster size.
596 sects_per_clust = bpb[13];
597
598 // Retrieve root cluster.
599 root_1st_clust = RD32_LE(&bpb[44]);
600
601 // Compute first sector for root cluster.
602 root_1st_sect = (root_1st_clust - 2) * sects_per_clust + ftl->frst_clust_sect;
603
604 // Allocate buffer for cluster write. Return -1 if unable.
605 clust_buf = FsCalloc(FAT_SECT_SZ * sects_per_clust, 1);
606 if (clust_buf == NULL)
607 rc = -1;
608
609 // Else write 0's in the root cluster and then free buffer.
610 else {
611 rc = FtlnWrSects(clust_buf, root_1st_sect, sects_per_clust, ftl);
612 FsFree(clust_buf);
613 }
614 }
615
616 #if FTLN_DEBUG
617 printf("FtlnSetClustSect1: set to %u, offset = %u\n", ftl->frst_clust_sect, ftl->clust_off);
618 #endif
619
620 // Return status.
621 return rc;
622 }
623 #endif // INC_FAT_MBR
624
625 // FtlnEraseBlk: Erase a block, increment its wear count, and mark it
626 // free and erased
627 //
628 // Inputs: ftl = pointer to FTL control block
629 // b = block to erase
630 //
631 // Returns: 0 on success, -1 on error
632 //
FtlnEraseBlk(FTLN ftl,ui32 b)633 int FtlnEraseBlk(FTLN ftl, ui32 b) {
634 ui32 b_wc;
635
636 #if INC_ELIST
637 // Check if list of erased blocks/wear counts exists.
638 if (ftl->elist_blk != (ui32)-1) {
639 ui32 eb = ftl->elist_blk;
640
641 // Forget erased list block number.
642 ftl->elist_blk = (ui32)-1;
643
644 // If not this block, erase it - because its info is out-of-date.
645 if (eb != b)
646 if (FtlnEraseBlk(ftl, eb))
647 return -1;
648 }
649 #endif
650
651 // Call driver to erase block. Return -1 if error.
652 ++ftl->stats.erase_block;
653 if (ftl->erase_block(ftl->start_pn + b * ftl->pgs_per_blk, ftl->ndm))
654 return FtlnFatErr(ftl);
655
656 // Increment block wear count and possibly adjust highest.
657 b_wc = ftl->high_wc - ftl->blk_wc_lag[b] + 1;
658 if (ftl->high_wc < b_wc)
659 set_high_wc(ftl, b, b_wc);
660 else
661 --ftl->blk_wc_lag[b];
662
663 // If not free, increment free blocks count. Mark free and erased.
664 if (IS_FREE(ftl->bdata[b]) == FALSE)
665 ++ftl->num_free_blks;
666 ftl->bdata[b] = FREE_BLK_FLAG | ERASED_BLK_FLAG;
667
668 // Return success.
669 return 0;
670 }
671
672 // FtlnLoWcFreeBlk: Find the free block with the lowest wear count
673 //
674 // Input: ftl = pointer to FTL control block
675 //
676 // Returns: block number if successful, else (ui32)-1 if none free
677 //
FtlnLoWcFreeBlk(CFTLN ftl)678 ui32 FtlnLoWcFreeBlk(CFTLN ftl) {
679 ui32 b, free_b;
680
681 // Search for first free block. Return error if no block is free.
682 free_b = first_free_blk(ftl);
683 if (free_b == (ui32)-1)
684 return free_b;
685
686 // Continue search. Want free block with lowest wear count.
687 for (b = free_b + 1; b < ftl->num_blks; ++b)
688 if (IS_FREE(ftl->bdata[b]) && (ftl->blk_wc_lag[b] > ftl->blk_wc_lag[free_b]))
689 free_b = b;
690
691 // Return block number.
692 return free_b;
693 }
694
695 // FtlnHiWcFreeBlk: Find the free block with the highest wear count
696 //
697 // Input: ftl = pointer to FTL control block
698 //
699 // Returns: block number if successful, else (ui32)-1 if none free
700 //
FtlnHiWcFreeBlk(CFTLN ftl)701 ui32 FtlnHiWcFreeBlk(CFTLN ftl) {
702 ui32 b, free_b;
703
704 // Search for first free block. Return error if no block is free.
705 free_b = first_free_blk(ftl);
706 if (free_b == (ui32)-1)
707 return free_b;
708
709 // Continue search. Want free block with highest wear count.
710 for (b = free_b + 1; b < ftl->num_blks; ++b)
711 if (IS_FREE(ftl->bdata[b]) && (ftl->blk_wc_lag[b] < ftl->blk_wc_lag[free_b]))
712 free_b = b;
713
714 // Return block number.
715 return free_b;
716 }
717
718 // FtlnFormat: Erase all map blocks, mark all blocks free, and reset
719 // the FTL (keeping wear offsets)
720 //
721 // Inputs: ftl = pointer to FTL control block
722 // meta_block = number of block holding the metapage
723 //
724 // Returns: 0 on success, -1 on error
725 //
FtlnFormat(FTLN ftl,ui32 meta_block)726 int FtlnFormat(FTLN ftl, ui32 meta_block) {
727 ui32 b;
728
729 PfAssert(meta_block < ftl->num_blks);
730 // Erase all map blocks, except the one containing the metapage.
731 for (b = 0; b < ftl->num_blks; ++b) {
732 // Skip non-map blocks.
733 if (!IS_MAP_BLK(ftl->bdata[b]))
734 continue;
735
736 // Skip block containing the metapage - this will be erased last.
737 if (b == meta_block)
738 continue;
739
740 // Erase map block. Return -1 if error.
741 if (FtlnEraseBlk(ftl, b))
742 return -1;
743 }
744
745 // Erase the block holding the metapage: format finished!
746 if (FtlnEraseBlk(ftl, meta_block))
747 return -1;
748
749 // Mark all non-erased blocks as free with zero read wear.
750 for (b = 0; b < ftl->num_blks; ++b)
751 if (!IS_FREE(ftl->bdata[b]))
752 ftl->bdata[b] = FREE_BLK_FLAG;
753 ftl->num_free_blks = ftl->num_blks;
754
755 // Re-initialize volume state.
756 FtlnStateRst(ftl);
757 ftl->high_bc = 1; // initial block count of unformatted volumes
758
759 #if FTLN_DEBUG
760 // Display FTL statistics.
761 FtlnBlkStats(ftl);
762 #endif
763
764 // Return success.
765 return 0;
766 }
767
768 // FtlnStateRst: Initialize volume state (except wear count offsets)
769 //
770 // Input: ftl = pointer to FTL control block
771 //
FtlnStateRst(FTLN ftl)772 void FtlnStateRst(FTLN ftl) {
773 int n;
774
775 ftl->high_bc = 0;
776 ftl->high_bc_mblk = ftl->resume_vblk = (ui32)-1;
777 ftl->high_bc_mblk_po = 0;
778 ftl->copy_end_found = FALSE;
779 ftl->max_rc_blk = (ui32)-1;
780 ftl->free_vpn = ftl->free_mpn = (ui32)-1;
781 #if INC_FAT_MBR
782 ftl->frst_clust_sect = (ui32)-1;
783 #endif
784 #if INC_ELIST
785 ftl->elist_blk = (ui32)-1;
786 #endif
787 ftl->deferment = 0;
788 #if FTLN_DEBUG
789 ftl->max_wc_lag = 0;
790 #endif
791 #if FS_ASSERT
792 ftl->assert_no_recycle = FALSE;
793 #endif
794 memset(ftl->spare_buf, 0xFF, ftl->pgs_per_blk * ftl->eb_size);
795 for (n = 0; n < ftl->num_map_pgs; ++n) ftl->mpns[n] = (ui32)-1;
796 ftlmcInit(ftl->map_cache);
797 #if INC_FTL_PAGE_CACHE
798 if (ftl->vol_cache)
799 FcReinit(ftl->vol_cache, ftl->page_size);
800 #endif
801 }
802
803 // FtlnDecUsed: Decrement block used count for page no longer in-use
804 //
805 // Inputs: ftl = pointer to FTL control block
806 // pn = physical page number
807 // vpn = virtual page number
808 //
FtlnDecUsed(FTLN ftl,ui32 pn,ui32 vpn)809 void FtlnDecUsed(FTLN ftl, ui32 pn, ui32 vpn) {
810 ui32 b = pn / ftl->pgs_per_blk;
811
812 // Decrement block used count.
813 PfAssert(NUM_USED(ftl->bdata[b]));
814 PfAssert(!IS_FREE(ftl->bdata[b]));
815 DEC_USED(ftl->bdata[b]);
816
817 #if FTLN_DEBUG
818 // Read page spare area (exit if error) and assert VPNs match.
819 ++ftl->stats.read_spare;
820 if (ftl->read_spare(ftl->start_pn + pn, ftl->spare_buf, ftl->ndm) < 0)
821 exit(errno);
822 PfAssert(GET_SA_VPN(ftl->spare_buf) == vpn);
823 #endif
824 } //lint !e818
825
826 // FtlnFatErr: Process FTL-NDM fatal error
827 //
828 // Input: ftl = pointer to FTL control block
829 //
830 // Returns: -1
831 //
FtlnFatErr(FTLN ftl)832 int FtlnFatErr(FTLN ftl) {
833 ftl->flags |= FTLN_FATAL_ERR;
834 return FsError(EIO);
835 }
836
837 #if FTLN_DEBUG
Spaces(int num)838 void Spaces(int num) {
839 while (num-- > 0) putchar(' ');
840 }
841 // flush_bstat: Flush buffered statistics counts
842 //
843 // Inputs: ftl = pointer to FTL control block
844 // b = block number of current block
845 // type = "FREE", "MAP", or "VOLUME"
846 // In/Outputs: *blk0 = first consecutive block number or -1
847 // *blke = end consecutive block number
848 //
flush_bstat(CFTLN ftl,int * blk0,int * blke,int b,char * type)849 static void flush_bstat(CFTLN ftl, int* blk0, int* blke, int b, char* type) {
850 if (*blk0 == -1)
851 *blk0 = *blke = b;
852 else if (*blke + 1 == b)
853 *blke = b;
854 else {
855 printf("B = %4u", *blk0);
856 if (*blk0 == *blke) {
857 printf(" - used = %2u, wc lag = %3d, rc = %8u", NUM_USED(ftl->bdata[*blk0]),
858 ftl->blk_wc_lag[*blk0], GET_RC(ftl->bdata[*blk0]));
859 printf(" - %s BLOCK\n", type);
860 } else {
861 printf("-%-4u", *blke);
862 Spaces(37);
863 printf("- %s BLOCKS\n", type);
864 }
865 *blk0 = *blke = b;
866 }
867 }
868
869 // FtlnBlkStats: Debug function to display blocks statistics
870 //
871 // Input: ftl = pointer to FTL control block
872 //
FtlnBlkStats(CFTLN ftl)873 void FtlnBlkStats(CFTLN ftl) {
874 int b, free0 = -1, freee, vol0 = -1, vole;
875
876 printf(
877 "\nBLOCK STATS: %u blocks, %u pages per block, curr free "
878 "blocks = %u\n",
879 ftl->num_blks, ftl->pgs_per_blk, ftl->num_free_blks);
880
881 // Loop over FTL blocks.
882 for (b = 0; b < ftl->num_blks; ++b) {
883 // Check if block is free.
884 if (IS_FREE(ftl->bdata[b])) {
885 flush_bstat(ftl, &vol0, &vole, -1, "VOLUME");
886 flush_bstat(ftl, &free0, &freee, b, "FREE");
887 }
888
889 // Else check if map block.
890 else if (IS_MAP_BLK(ftl->bdata[b])) {
891 flush_bstat(ftl, &free0, &freee, -1, "FREE");
892 flush_bstat(ftl, &vol0, &vole, -1, "VOLUME");
893 printf("B = %4u - used = %2u, wc lag = %3d, rc = %8u - ", b, NUM_USED(ftl->bdata[b]),
894 ftl->blk_wc_lag[b], GET_RC(ftl->bdata[b]));
895 printf("MAP BLOCK\n");
896 }
897
898 // Else is volume block.
899 else {
900 flush_bstat(ftl, &free0, &freee, -1, "FREE");
901 #if FTLN_DEBUG <= 1
902 flush_bstat(ftl, &vol0, &vole, b, "VOLUME");
903 #else
904 printf("B = %4u - used = %2u, wc lag = %3d, rc = %8u - ", b, NUM_USED(ftl->bdata[b]),
905 ftl->blk_wc_lag[b], GET_RC(ftl->bdata[b]));
906 printf("VOLUME BLOCK\n");
907 #endif
908 }
909 }
910 flush_bstat(ftl, &free0, &freee, -1, "FREE");
911 flush_bstat(ftl, &vol0, &vole, -1, "VOLUME");
912 }
913 #endif // FTLN_DEBUG
914
915 #if FTLN_DEBUG > 1
916 // FtlnStats: Display FTL statistics
917 //
918 // Input: ftl = pointer to FTL control block
919 //
FtlnStats(FTLN ftl)920 void FtlnStats(FTLN ftl) {
921 ui32 b, n;
922
923 printf("\nFTL STATS:\n");
924 printf(" - # vol sects = %d\n", ftl->num_vsects);
925 printf(" - # vol pages = %d\n", ftl->num_vpages);
926 #if INC_FAT_MBR
927 printf(" - 1st_clust_sect = %d\n", ftl->frst_clust_sect);
928 printf(" - clust_off = %d\n", ftl->clust_off);
929 printf(" - vol_frst_sect = %d\n", ftl->vol_frst_sect);
930 #endif
931 printf(" - # map pages = %d\n", ftl->num_map_pgs);
932 printf(" - # free blocks = %d\n", ftl->num_free_blks);
933 for (n = b = 0; b < ftl->num_blks; ++b)
934 if (IS_ERASED(ftl->bdata[b]))
935 ++n;
936 printf(" - # erased blks = %d\n", n);
937 printf(" - flags =");
938 if (ftl->flags & FTLN_FAT_VOL)
939 printf(" FTLN_FAT_VOL");
940 if (ftl->flags & FTLN_XFS_VOL)
941 printf(" FTLN_XFS_VOL");
942 if (ftl->flags & FTLN_FATAL_ERR)
943 printf(" FTLN_FATAL_ERR");
944 if (ftl->flags & FTLN_MOUNTED)
945 printf(" FTLN_MOUNTED");
946 putchar('\n');
947 }
948 #endif // FTLN_DEBUG
949
950 #if DEBUG_ELIST
951 // FtlnCheckBlank: Ensure the specified block is blank
952 //
953 // Inputs: ftl = pointer to FTL control block
954 // b = block number of block to check
955 //
FtlnCheckBlank(FTLN ftl,ui32 b)956 void FtlnCheckBlank(FTLN ftl, ui32 b) {
957 ui32 pn = b * ftl->pgs_per_blk;
958 ui32 end = pn + ftl->pgs_per_blk;
959 int rc;
960
961 do {
962 rc = ftl->page_check(pn, ftl->main_buf, ftl->spare_buf, ftl->ndm);
963 if (rc != NDM_PAGE_ERASED)
964 exit(EINVAL);
965 } while (++pn < end);
966 }
967 #endif // DEBUG_ELIST
968
969 #endif // INC_FTL_NDM
970