1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <sys.h>
6 #include "ftl_mc.h"
7 #include <fsprivate.h>
8 
9 // Configuration
10 #define MC_DEBUG FALSE
11 
12 // Symbol Definitions
13 // States allowed for a cached MPN
14 #define FTLMC_CLEAN 0
15 #define FTLMC_DIRTY 1
16 
17 // Type Declarations
18 struct ftlmc_cache_entry {
19     CircLink lru_link;     // least recently used list entry
20     ftlmcEntry* next_hash; // hash bucket list is double linked list
21     ftlmcEntry* prev_hash;
22     ftlmcEntry** hash_head; // pointer to head of hash table list
23     ui32* data;             // pointer to map page contents
24     ui32 mpn;               // map page number
25     ui16 status;            // status of page - dirty/clean
26 };
27 
28 // Local Function Definitions
29 
30 //        hash: Hash based on the page number
31 //
32 //      Inputs: mpn = map page number to be hashed
33 //              num_mpgs = size of hash table in number of map pages
34 //
35 //     Returns: Index into the hash table where value gets hashed
36 //
hash(ui32 mpn,ui32 num_mpgs)37 static ui32 hash(ui32 mpn, ui32 num_mpgs) {
38     return (19823u * mpn + 321043U) % num_mpgs;
39 }
40 
41 #if MC_DEBUG
42 // check_cache: Debug function to check cache consistency
43 //
check_cache(FTLMC * cache,ui32 mpn)44 static void check_cache(FTLMC* cache, ui32 mpn) {
45     int count = 0;
46     ftlmcEntry* entry;
47     CircLink* link;
48 
49     // Loop over map page cache LRU list.
50     for (link = CIRC_LIST_HEAD(&cache->lru_list);; link = link->next_bck) {
51         // Break at list end.
52         if (CIRC_LIST_AT_END(link, &cache->lru_list))
53             break;
54 
55         // Convert 'lru_link' pointer to entry pointer.
56         entry = (ftlmcEntry*)((ui8*)link - offsetof(ftlmcEntry, lru_link));
57 
58         // Check if entry is used.
59         if (entry->mpn != (ui32)-1) {
60             if (entry->hash_head != &cache->hash_tbl[hash(entry->mpn, cache->num_mpgs)]) {
61                 printf("\nFTL MAP CACHE: mpn = %u hash_head != hash()\n", mpn);
62                 exit(-1);
63             }
64             if (entry->hash_head[0] == NULL) {
65                 printf("\nFTL MAP CACHE: mpn = %u hash_head is NULL!\n", mpn);
66                 exit(-1);
67             }
68         }
69     }
70 
71     if (count > 1) {
72         printf("\nFTL MAP CACHE: mpn = %u is cached %d times\n", mpn, count);
73         exit(-1);
74     }
75 }
76 #endif // MC_DEBUG
77 
78 // Global Function Definitions
79 
80 //    ftlmcRAM: Return RAM used FTL cache
81 //
82 //       Input: cache = cache handle or NULL
83 //
84 //     Returns: RAM usage in bytes
85 //
ftlmcRAM(const FTLMC * cache)86 ui32 ftlmcRAM(const FTLMC* cache) {
87     return cache
88                ? sizeof(FTLMC) +
89                      cache->num_mpgs * (sizeof(ftlmcEntry) + cache->mpg_sz + sizeof(ftlmcEntry*))
90                : 0;
91 }
92 
93 //    ftlmcNew: Create a new instance of an FTL map pages cache
94 //
95 //      Inputs: ftl = handle to FTL using this cache
96 //              num_mpgs = number of map pages the cache holds
97 //              wf = write map page function in case of cache miss
98 //              rf = read map page function in case of cache miss
99 //              mpg_sz = map page size in bytes
100 //
101 //     Returns: Handle to new cache if successful, NULL on error
102 //
ftlmcNew(void * ftl,ui32 num_mpgs,ftlmcFuncW wf,ftlmcFuncR rf,ui32 mpg_sz)103 FTLMC* ftlmcNew(void* ftl, ui32 num_mpgs, ftlmcFuncW wf, ftlmcFuncR rf, ui32 mpg_sz) {
104     FTLMC* cache;
105     ui32* data_space;
106 
107     // Allocate space to hold the cache structure. Return NULL if unable.
108     cache = FsCalloc(1, sizeof(FTLMC));
109     if (cache == NULL)
110         return NULL;
111 
112     // Set the cache FTL handle and read and write functions.
113     cache->ftl = ftl;
114     cache->read_TFS = rf;
115     cache->write_TFS = wf;
116 
117     // Set the number of pages and their size.
118     cache->num_mpgs = num_mpgs;
119     cache->mpg_sz = mpg_sz;
120 
121     // Allocate memory for entries and hash table. Return NULL if unable.
122     cache->entry = FsCalloc(num_mpgs, sizeof(ftlmcEntry));
123     if (cache->entry == NULL) {
124         FsFree(cache);
125         return NULL;
126     }
127     cache->hash_tbl = FsCalloc(num_mpgs, sizeof(ftlmcEntry*));
128     if (cache->hash_tbl == NULL) {
129         FsFree(cache->entry);
130         FsFree(cache);
131         return NULL;
132     }
133 
134     // Allocate memory to hold MPN contents. Return NULL if unable.
135     data_space = FsAalloc(num_mpgs * mpg_sz);
136     if (data_space == NULL) {
137         FsFree(cache->hash_tbl);
138         FsFree(cache->entry);
139         FsFree(cache);
140         return NULL;
141     }
142 
143     // Initialize the cache and return cache handle.
144     cache->entry[0].data = data_space;
145     ftlmcInit(cache);
146     return cache;
147 }
148 
149 //   ftlmcInit: Initialize the cache
150 //
151 //       Input: cache = handle to cache
152 //
ftlmcInit(FTLMC * cache)153 void ftlmcInit(FTLMC* cache) {
154     ui32 i, vpns_per_mpg = cache->mpg_sz / sizeof(ui32);
155     ui32* memp = cache->entry[0].data;
156 
157     // Initialize cache's least recently used list.
158     CIRC_LIST_INIT(&cache->lru_list);
159 
160     // Loop to initialize each cache entry.
161     for (i = 0; i < cache->num_mpgs; ++i, memp += vpns_per_mpg) {
162         // Assign pointer to cache entry's page data buffer.
163         cache->entry[i].data = memp;
164 
165         // Initialize entry as unused and clean.
166         cache->entry[i].mpn = (ui32)-1;
167         cache->entry[i].status = FTLMC_CLEAN;
168 
169         // Append entry to cache's least recently used list.
170         CIRC_LIST_APPEND(&cache->entry[i].lru_link, &cache->lru_list);
171 
172         // Initialize the entry hash table pointers.
173         cache->entry[i].hash_head = NULL;
174         cache->hash_tbl[i] = NULL;
175     }
176 
177     // There are no dirty entries at this point.
178     cache->num_dirty = 0;
179 } //lint !e429
180 
181 //  ftlmcDelete: Delete an FTL cache for map pages
182 //
183 //       Input: cache_ptr = pointer to cache handle
184 //
ftlmcDelete(FTLMC ** cache_ptr)185 void ftlmcDelete(FTLMC** cache_ptr) {
186     FTLMC* cache = *cache_ptr;
187 
188     // Deallocate all the memory associated with the cache.
189     FsAfreeClear(&cache->entry[0].data);
190     FsFree(cache->entry);
191     FsFree(cache->hash_tbl);
192     FsFree(cache);
193 
194     // Reset pointer to cache handle.
195     *cache_ptr = NULL;
196 }
197 
198 // ftlmcGetPage: Retrieve contents of map page via cache
199 //
200 //      Inputs: cache = cache handle
201 //              mpn = map page number
202 //              new_ptr = new map flag address if read, else NULL
203 //      Output: *new_ptr = TRUE if new mapping for read
204 //
205 //     Returns: Pointer to map page data on success, NULL on error
206 //
ftlmcGetPage(FTLMC * cache,ui32 mpn,int * new_ptr)207 void* ftlmcGetPage(FTLMC* cache, ui32 mpn, int* new_ptr) {
208     ftlmcEntry* entry;
209     CircLink* link;
210     uint hash_ndx;
211 
212 #if MC_DEBUG
213     check_cache(cache, mpn);
214 #endif
215 
216     // Find page's hash table entry.
217     entry = cache->hash_tbl[hash(mpn, cache->num_mpgs)];
218 
219     // Search hash entry for matching page number.
220     for (; entry; entry = entry->next_hash) {
221         // Check if page numbers match.
222         if (entry->mpn == mpn) {
223             // Move entry to end of LRU list.
224             CIRC_NODE_REMOVE(&entry->lru_link);
225             CIRC_LIST_APPEND(&entry->lru_link, &cache->lru_list);
226 
227             // If reading, flag that this page is mapped (because cached).
228             if (new_ptr)
229                 *new_ptr = FALSE;
230 
231             // Else writing. If entry was clean, mark it dirty.
232             else if (entry->status == FTLMC_CLEAN) {
233                 ++cache->num_dirty;
234                 PfAssert(cache->num_dirty <= cache->num_mpgs);
235                 entry->status = FTLMC_DIRTY;
236             }
237 
238             // Return pointer to page data.
239             return entry->data;
240         }
241     }
242 
243     // Not cached. Search LRU list for least recently used clean entry
244     // If none found, use least recently used entry (head of LRU list).
245     for (link = CIRC_LIST_HEAD(&cache->lru_list);; link = link->next_bck) {
246         // Check if at end of least recently used list.
247         if (CIRC_LIST_AT_END(link, &cache->lru_list)) {
248             // Re-use cache's least recently used entry (its LRU head).
249             link = CIRC_LIST_HEAD(&cache->lru_list);
250 
251             // Convert 'lru_link' pointer to entry pointer.
252             entry = (void*)((ui8*)link - offsetof(ftlmcEntry, lru_link));
253 
254             // Check if page is dirty.
255             if (entry->status == FTLMC_DIRTY) {
256                 // Write old page contents. Return NULL if error.
257                 if (cache->write_TFS(cache->ftl, entry->mpn, entry->data))
258                     return NULL;
259 
260                 // Mark page clean and decrement dirty count.
261                 entry->status = FTLMC_CLEAN;
262                 PfAssert(cache->num_dirty);
263                 --cache->num_dirty;
264             }
265 
266             // Break to use this entry.
267             break;
268         }
269 
270         // Convert 'lru_link' pointer to entry pointer.
271         entry = (ftlmcEntry*)((ui8*)link - offsetof(ftlmcEntry, lru_link));
272 
273         // Break to use this entry if it is clean.
274         if (entry->status == FTLMC_CLEAN)
275             break;
276     }
277 
278     // Move entry to end of LRU list.
279     CIRC_NODE_REMOVE(&entry->lru_link);
280     CIRC_LIST_APPEND(&entry->lru_link, &cache->lru_list);
281 
282     // If the entry is in the hash table, remove it from there.
283     if (entry->hash_head) {
284         // If entry is not first, update previous one, else update head.
285         if (entry->prev_hash)
286             entry->prev_hash->next_hash = entry->next_hash;
287         else
288             *(entry->hash_head) = entry->next_hash;
289 
290         // If there is a next entry (entry is not end-of-list), update it.
291         if (entry->next_hash)
292             entry->next_hash->prev_hash = entry->prev_hash;
293     }
294 
295     // Read new page into the cache. Return NULL if error.
296     if (cache->read_TFS(cache->ftl, mpn, entry->data, new_ptr))
297         return NULL;
298 
299     // Set cache entry with new page info.
300     entry->mpn = mpn;
301 
302     // Determine location in hash table for page.
303     hash_ndx = hash(mpn, cache->num_mpgs);
304 
305     // Add new entry into the hash table.
306     entry->prev_hash = NULL;
307     if (cache->hash_tbl[hash_ndx]) {
308         entry->next_hash = cache->hash_tbl[hash_ndx];
309         cache->hash_tbl[hash_ndx]->prev_hash = entry;
310     } else
311         entry->next_hash = NULL;
312     cache->hash_tbl[hash_ndx] = entry;
313     entry->hash_head = &cache->hash_tbl[hash_ndx];
314 
315     // If map page write, mark entry as dirty and adjust dirty count.
316     if (new_ptr == NULL) {
317         entry->status = FTLMC_DIRTY;
318         ++cache->num_dirty;
319         PfAssert(cache->num_dirty <= cache->num_mpgs);
320     }
321 
322     // Return pointer to page data.
323     return entry->data;
324 }
325 
326 // ftlmcFlushPage: Search cache for dirty page to flush
327 //
328 //      Inputs: cache = cache handle
329 //              mpn = MPN to flush
330 //
331 //     Returns: 0 on success, -1 on failure
332 //
ftlmcFlushPage(FTLMC * cache,ui32 mpn)333 int ftlmcFlushPage(FTLMC* cache, ui32 mpn) {
334     ftlmcEntry* entry = &cache->entry[0];
335 
336     // Loop over cache entries, looking for page to flush.
337     for (; entry < &cache->entry[cache->num_mpgs]; ++entry) {
338         // Continue if not a page number match.
339         if (entry->mpn != mpn)
340             continue;
341 
342         // Break if page is clean.
343         if (entry->status == FTLMC_CLEAN)
344             break;
345 
346         // Mark clean, adjust dirty count, save page, and return status.
347         entry->status = FTLMC_CLEAN;
348         PfAssert(cache->num_dirty);
349         --cache->num_dirty;
350         return cache->write_TFS(cache->ftl, entry->mpn, entry->data);
351     }
352 
353     // Return success.
354     return 0;
355 }
356 
357 // ftlmcFlushMaps: Flush all dirty map pages
358 //
359 //       Input: cache = cache handle
360 //
361 //     Returns: 0 on success, -1 on failure
362 //
ftlmcFlushMaps(FTLMC * cache)363 int ftlmcFlushMaps(FTLMC* cache) {
364     ftlmcEntry* entry = &cache->entry[0];
365 
366     // Loop over cache entries, flushing each dirty one.
367     for (; entry < &cache->entry[cache->num_mpgs]; ++entry) {
368         // Break if all map pages are clean.
369         if (cache->num_dirty == 0)
370             break;
371 
372         // If this page is dirty, save it and mark it clean.
373         if (entry->status == FTLMC_DIRTY) {
374             entry->status = FTLMC_CLEAN;
375             PfAssert(cache->num_dirty);
376             --cache->num_dirty;
377             if (cache->write_TFS(cache->ftl, entry->mpn, entry->data))
378                 return -1;
379         }
380     }
381 
382     // No more dirty entries at this point. Return success.
383     PfAssert(cache->num_dirty == 0);
384     return 0;
385 }
386 
387 // ftlmcInCache: Check if map page to be written is in the cache
388 //
389 //      Inputs: cache = cache handle
390 //              mpn = map page number
391 //
392 //     Returns: Pointer to page data, NULL if page not in cache
393 //
ftlmcInCache(FTLMC * cache,ui32 mpn)394 ui32* ftlmcInCache(FTLMC* cache, ui32 mpn) {
395     ftlmcEntry* entry;
396 
397 #if MC_DEBUG
398     check_cache(cache, mpn);
399 #endif
400 
401     // Find page's hash table entry.
402     entry = cache->hash_tbl[hash(mpn, cache->num_mpgs)];
403 
404     // Search hash entry for matching page number.
405     for (; entry; entry = entry->next_hash) {
406         // Check if page numbers match.
407         if (entry->mpn == mpn) {
408             // If not clean, mark page clean and decrement dirty count.
409             if (entry->status != FTLMC_CLEAN) {
410                 entry->status = FTLMC_CLEAN;
411                 PfAssert(cache->num_dirty);
412                 --cache->num_dirty;
413             }
414 
415             // Return pointer to cached page's data.
416             return entry->data;
417         }
418     }
419 
420     // Page is not in cache, return NULL.
421     return NULL;
422 }
423