1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "ftlnp.h"
6 
7 #if INC_FTL_NDM
8 // Type Definitions
9 typedef struct {
10     ui32 first_ppn;  // first physical page number or -1
11     ui32 run_cnt;    // number of staged page reads
12     ui8* buf;        // pointer to output buffer
13 } StagedRd;
14 
15 // Local Function Definitions
16 
17 #if INC_SECT_FTL
18 // partial_page_read: Read some sectors from a virtual page
19 //
20 //      Inputs: ftl = pointer to FTL control block
21 //              vpn = virtual page number
22 //              byte_offset = first sector's offset into page
23 //              byte_count = number of bytes to read from page
24 //              buf = pointer to where data is copied to
25 //
26 //     Returns: 0 on success, -1 on failure
27 //
partial_page_read(FTLN ftl,ui32 vpn,uint byte_offset,uint byte_count,ui8 * buf)28 static int partial_page_read(FTLN ftl, ui32 vpn, uint byte_offset, uint byte_count, ui8* buf) {
29     ui32 pn;
30 
31 #if INC_FTL_PAGE_CACHE
32     // Check if there is a volume page cache.
33     if (ftl->vol_cache) {
34         FcEntry* cache_ent;
35 
36         // Attempt to retrieve page via cache. If error, return.
37         cache_ent = ftlvcGetRdPage(ftl->vol_cache, vpn);
38         if (cache_ent == (void*)-1)
39             return -1;
40 
41         // If page cached, retrieve its contents and return success.
42         if (cache_ent) {
43             memcpy(buf, &cache_ent->data[byte_offset], byte_count);
44             FcFreeEntry(ftl->vol_cache, &cache_ent);
45             return 0;
46         }
47     }
48 #endif
49 
50     // Prepare to potentially write one map page. Return -1 if error.
51     if (FtlnRecCheck(ftl, -1))
52         return -1;
53 
54     // Convert the virtual page number to its physical page number.
55     if (FtlnMapGetPpn(ftl, vpn, &pn) < 0)
56         return -1;
57 
58     // If unmapped, output data values as for unwritten data.
59     if (pn == (ui32)-1)
60         memset(buf, 0xFF, byte_count);
61 
62     // Else read page from flash and copy relevant sectors from page.
63     else {
64         if (FtlnRdPage(ftl, pn, ftl->swap_page))
65             return -1;
66         memcpy(buf, &ftl->swap_page[byte_offset], byte_count);
67     }
68 
69     // Return success.
70     return 0;
71 }
72 #endif // INC_SECT_FTL
73 
74 // flush_pending_reads: Read all pages that are pending
75 //
76 //      Inputs: ftl = pointer to FTL control block
77 //              staged = pointer to structure holding 1st page number,
78 //                       page count, and output buffer pointer
79 //
80 //     Returns: 0 on success, -1 on error
81 //
flush_pending_reads(FTLN ftl,StagedRd * staged)82 static int flush_pending_reads(FTLN ftl, StagedRd* staged) {
83     int status;
84     ui32* b_ptr;
85 
86     // Issue pending reads.
87     ftl->stats.read_page += staged->run_cnt;
88     status = ftl->read_pages(ftl->start_pn + staged->first_ppn, staged->run_cnt, staged->buf,
89                              ftl->spare_buf, ftl->ndm);
90 
91     // Adjust data buffer pointer.
92     staged->buf += staged->run_cnt * ftl->page_size;
93 
94     // Get handle on blocks[] entry and increment block wear count.
95     b_ptr = &ftl->bdata[staged->first_ppn / ftl->pgs_per_blk];
96     INC_RC(ftl, b_ptr, staged->run_cnt);
97 
98     // Check if error was reported.
99     if (status) {
100         // If block needs to be recycled, set block read count to its max.
101         if (status == 1) {
102             SET_MAX_RC(ftl, b_ptr);
103             status = 0;
104         }
105 
106         // Else if fatal error, set errno and fatal I/O flag, return -1.
107         else if (status == -2)
108             return FtlnFatErr(ftl);
109     }
110 
111     // Reset pending sequence and return status.
112     staged->first_ppn = (ui32)-1;
113     staged->run_cnt = 0;
114     return status;
115 }
116 
117 // read_sectors: Read as many virtual sectors as possible/needed from
118 //              an FTL page
119 //
120 //      Inputs: ftl = pointer to FTL control block
121 //              vsn = first virtual sector to read
122 //              count = number of consecutive sectors to read
123 //              data = pointer to where data is copied to
124 //
125 //     Returns: 0 on success, -1 on error
126 //
read_sectors(FTLN ftl,ui32 vsn,ui32 count,ui8 * data)127 static int read_sectors(FTLN ftl, ui32 vsn, ui32 count, ui8* data) {
128 #if INC_SECT_FTL
129     ui32 sect_cnt, sect_offset;
130 #endif
131     ui32 vpn;
132     StagedRd staged;
133 
134     // Initialize structure for staging deferred consecutive page reads.
135     staged.buf = data;
136     staged.run_cnt = 0;
137     staged.first_ppn = (ui32)-1;
138 
139     // Set errno and return -1 if fatal I/O error occurred.
140     if (ftl->flags & FTLN_FATAL_ERR)
141         return FsError(EIO);
142 
143     // Get offset in page to first sector and its virtual page number.
144     vpn = vsn / ftl->sects_per_page;
145 #if INC_SECT_FTL
146     sect_offset = vsn % ftl->sects_per_page;
147 
148     // Determine the number of sectors to read from the first page.
149     sect_cnt = ftl->sects_per_page - sect_offset;
150     if (sect_cnt > count)
151         sect_cnt = count;
152 
153     // Check if skipping some sectors on first page.
154     if (sect_cnt < ftl->sects_per_page) {
155         uint byte_offset = sect_offset * ftl->sect_size;
156         uint byte_count = sect_cnt * ftl->sect_size;
157 
158         // Perform partial read. If error return -1.
159         if (partial_page_read(ftl, vpn, byte_offset, byte_count, staged.buf))
160             return -1;
161 
162         // Adjust buffer pointer and counters for successful partial read.
163         staged.buf += byte_count;
164         ++vpn;
165         count -= sect_cnt;
166     }
167 
168     // Check if any whole page reads are left.
169     if (count >= ftl->sects_per_page)
170 #endif
171     {
172         ui32 pn;
173 
174         // Loop to read whole pages.
175         do {
176             // Check if reads are staged and PPN lookup could cause recycle.
177             if (staged.run_cnt) {
178                 // If next PPN lookup could cause recycle, flush saved PPNs.
179                 if (FtlnRecNeeded(ftl, -1)) {
180                     if (flush_pending_reads(ftl, &staged))
181                         return -1;
182                 }
183 
184 #if FS_ASSERT
185                 // Else confirm no physical page number changes due to recycle.
186                 else
187                     ftl->assert_no_recycle = TRUE;
188 #endif
189             }
190 
191             // Prepare to potentially write one map page. Return -1 if error.
192             if (FtlnRecCheck(ftl, -1))
193                 return -1;
194 
195             // Convert the virtual page number to its physical page number.
196             if (FtlnMapGetPpn(ftl, vpn, &pn) < 0)
197                 return -1;
198 
199 #if FS_ASSERT
200             // End check for no physical page number changes.
201             ftl->assert_no_recycle = FALSE;
202 #endif
203 
204             // Check if page is unmapped.
205             if (pn == (ui32)-1) {
206                 // Flush pending reads if any.
207                 if (staged.first_ppn != (ui32)-1)
208                     if (flush_pending_reads(ftl, &staged))
209                         return -1;
210 
211                 // Fill page's sectors with the value for unwritten data and
212                 // advance buffer pointer.
213                 memset(staged.buf, 0xFF, ftl->page_size);
214                 staged.buf += ftl->page_size;
215             }
216 
217             // Else have valid mapped page number.
218             else {
219 #if INC_FTL_PAGE_CACHE
220                 FcEntry* cache_ent = NULL;
221 
222                 // If there is a volume page cache, check if page is cached.
223                 if (ftl->vol_cache) {
224                     // If page in cache, use cached version.
225                     cache_ent = FcInCache(ftl->vol_cache, vpn);
226                     if (cache_ent) {
227                         // Flush pending reads, if any. Return -1 if error.
228                         if (staged.first_ppn != (ui32)-1)
229                             if (flush_pending_reads(ftl, &staged))
230                                 return -1;
231 
232                         // Copy page contents and advance data buffer.
233                         memcpy(staged.buf, cache_ent->data, ftl->page_size);
234                         staged.buf += ftl->page_size;
235                     }
236                 }
237 
238                 // Check if page is uncached.
239                 if (cache_ent == NULL)
240 #endif
241                 {
242                     // If next in sequence and in same block, add page to list.
243                     if ((staged.first_ppn + staged.run_cnt == pn) &&
244                         (staged.first_ppn / ftl->pgs_per_blk == pn / ftl->pgs_per_blk))
245                         ++staged.run_cnt;
246 
247                     // Else flush pending reads, if any, and start new list.
248                     else {
249                         if (staged.first_ppn != (ui32)-1)
250                             if (flush_pending_reads(ftl, &staged))
251                                 return -1;
252                         staged.first_ppn = pn;
253                         staged.run_cnt = 1;
254                     }
255                 }
256             }
257 
258             // Adjust virtual page number and sector count.
259             ++vpn;
260             count -= ftl->sects_per_page;
261         } while (count >= ftl->sects_per_page);
262 
263         // Flush pending reads if any.
264         if (staged.first_ppn != (ui32)-1)
265             if (flush_pending_reads(ftl, &staged))
266                 return -1;
267     }
268 
269 #if INC_SECT_FTL
270     // Check if partial read is left.
271     if (count)
272         if (partial_page_read(ftl, vpn, 0, count * ftl->sect_size, staged.buf))
273             return -1;
274 #endif
275 
276     // Return success.
277     return 0;
278 }
279 
280 // Global Function Definitions
281 
282 #if INC_FTL_PAGE_CACHE
283 //   FtlnVpnRd: Cache function to read a volume page
284 //
285 //      Inputs: buffer = place to store read data
286 //              vpn = volume page number
287 //              vol_ptr = FTL handle
288 //
289 //     Returns: 0 on success, -1 on failure
290 //
FtlnVpnRd(void * buffer,ui32 vpn,void * vol_ptr)291 int FtlnVpnRd(void* buffer, ui32 vpn, void* vol_ptr) {
292     FTLN ftl = vol_ptr;
293 
294     return read_sectors(ftl, vpn * ftl->sects_per_page, ftl->sects_per_page, buffer);
295 }
296 #endif
297 
298 // FtlnRdSects: Read count worth of virtual sectors from FTL
299 //
300 //      Inputs: buffer = place to store data bytes from read sectors
301 //              sect = first sector to read from
302 //              count = number of consecutive sectors to read
303 //              vol = FTL handle
304 //
305 //     Returns: 0 on success, -1 on failure
306 //
FtlnRdSects(void * buffer,ui32 sect,int count,void * vol)307 int FtlnRdSects(void* buffer, ui32 sect, int count, void* vol) {
308     FTLN ftl = vol;
309 
310     // Ensure request is within volume's range of provided sectors.
311     if (sect + count > ftl->num_vsects)
312         return FsError(ENOSPC);
313 
314     // If no sectors to read, return success.
315     if (count == 0)
316         return 0;
317 
318     // If there's at least a block with a maximum read count, recycle.
319     if (ftl->max_rc_blk != (ui32)-1)
320         if (FtlnRecCheck(ftl, 0))
321             return -1;
322 
323 #if INC_FAT_MBR
324     // Ensure all cluster requests are page aligned.
325     if (sect >= ftl->frst_clust_sect)
326         sect += ftl->clust_off;
327 #endif
328 
329     // Read sectors and return status.
330     return read_sectors(ftl, sect, count, buffer);
331 }
332 
333 //  FtlnMapRd: Read an MPN from flash - used by MPN cache
334 //
335 //      Inputs: vol = FTL handle
336 //              mpn = map page to read
337 //              buf = buffer to hold contents of map page
338 //      Output: *unmapped = TRUE iff page is unmapped
339 //
340 //     Returns: 0 on success, -1 on error
341 //
FtlnMapRd(void * vol,ui32 mpn,void * buf,int * unmapped)342 int FtlnMapRd(void* vol, ui32 mpn, void* buf, int* unmapped) {
343     FTLN ftl = vol;
344     ui32 ppn;
345 
346     // Sanity check that map page index is valid and not the meta page.
347     PfAssert(mpn < ftl->num_map_pgs - 1);
348 
349     // Retrieve physical map page number from MPNs array, if available.
350     // Else output 0xFF's, set unmapped flag, and return success.
351     ppn = ftl->mpns[mpn];
352     if (ppn == (ui32)-1) {
353         memset(buf, 0xFF, ftl->page_size);
354         if (unmapped)
355             *unmapped = TRUE;
356         return 0;
357     }
358 
359     // If output pointer provided, mark page as mapped.
360     if (unmapped)
361         *unmapped = FALSE;
362 
363     // Read page from flash and return status.
364     return FtlnRdPage(ftl, ppn, buf);
365 }
366 
367 //  FtlnRdPage: Read one page from flash and check return status
368 //
369 //      Inputs: ftl = pointer to FTL control block
370 //              ppn = physical page number of page to read from flash
371 //              rd_buf = buffer to hold read contents
372 //
373 //     Returns: 0 on success, -1 on error
374 //
FtlnRdPage(FTLN ftl,ui32 ppn,void * rd_buf)375 int FtlnRdPage(FTLN ftl, ui32 ppn, void* rd_buf) {
376     int status;
377     ui32* b_ptr;
378 
379     // Set errno and return -1 if fatal I/O error occurred.
380     if (ftl->flags & FTLN_FATAL_ERR)
381         return FsError(EIO);
382 
383     // Read page from flash. If error, set errno/fatal flag/return -1.
384     ++ftl->stats.read_page;
385     status = ftl->read_pages(ftl->start_pn + ppn, 1, rd_buf, ftl->spare_buf, ftl->ndm);
386     if (status < 0)
387         return FtlnFatErr(ftl);
388 
389     // Get handle on block entry in blocks[].
390     b_ptr = &ftl->bdata[ppn / ftl->pgs_per_blk];
391 
392     // If recycle requested, set read count to max. Else increment it.
393     if (status)
394         SET_MAX_RC(ftl, b_ptr);
395     else
396         INC_RC(ftl, b_ptr, 1);
397 
398     // Return success.
399     return 0;
400 }
401 #endif // INC_FTL_NDM
402