1 /*
2 * Copyright (c) 2007 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <lk/list.h>
9 #include <stdlib.h>
10 #include <assert.h>
11 #include <string.h>
12 #include <sys/types.h>
13 #include <lk/debug.h>
14 #include <lk/trace.h>
15 #include <lib/bcache.h>
16 #include <lib/bio.h>
17
18 #define LOCAL_TRACE 0
19
20 struct bcache_block {
21 struct list_node node;
22 bnum_t blocknum;
23 int ref_count;
24 bool is_dirty;
25 void *ptr;
26 };
27
28 struct bcache_stats {
29 uint32_t hits;
30 uint32_t depth;
31 uint32_t misses;
32 uint32_t reads;
33 uint32_t writes;
34 };
35
36 struct bcache {
37 bdev_t *dev;
38 size_t block_size;
39 int count;
40 struct bcache_stats stats;
41
42 struct list_node free_list;
43 struct list_node lru_list;
44
45 struct bcache_block *blocks;
46 };
47
bcache_create(bdev_t * dev,size_t block_size,int block_count)48 bcache_t bcache_create(bdev_t *dev, size_t block_size, int block_count) {
49 struct bcache *cache;
50
51 cache = malloc(sizeof(struct bcache));
52
53 cache->dev = dev;
54 cache->block_size = block_size;
55 cache->count = block_count;
56 memset(&cache->stats, 0, sizeof(cache->stats));
57
58 list_initialize(&cache->free_list);
59 list_initialize(&cache->lru_list);
60
61 cache->blocks = malloc(sizeof(struct bcache_block) * block_count);
62 int i;
63 for (i=0; i < block_count; i++) {
64 cache->blocks[i].ref_count = 0;
65 cache->blocks[i].is_dirty = false;
66 cache->blocks[i].ptr = malloc(block_size);
67 // add to the free list
68 list_add_head(&cache->free_list, &cache->blocks[i].node);
69 }
70
71 return (bcache_t)cache;
72 }
73
flush_block(struct bcache * cache,struct bcache_block * block)74 static int flush_block(struct bcache *cache, struct bcache_block *block) {
75 int rc;
76
77 rc = bio_write(cache->dev, block->ptr,
78 (off_t)block->blocknum * cache->block_size,
79 cache->block_size);
80 if (rc < 0)
81 goto exit;
82
83 block->is_dirty = false;
84 cache->stats.writes++;
85 rc = 0;
86 exit:
87 return (rc);
88 }
89
bcache_destroy(bcache_t _cache)90 void bcache_destroy(bcache_t _cache) {
91 struct bcache *cache = _cache;
92 int i;
93
94 for (i=0; i < cache->count; i++) {
95 DEBUG_ASSERT(cache->blocks[i].ref_count == 0);
96
97 if (cache->blocks[i].is_dirty)
98 printf("warning: freeing dirty block %u\n",
99 cache->blocks[i].blocknum);
100
101 free(cache->blocks[i].ptr);
102 }
103
104 free(cache);
105 }
106
107 /* find a block if it's already present */
find_block(struct bcache * cache,uint blocknum)108 static struct bcache_block *find_block(struct bcache *cache, uint blocknum) {
109 uint32_t depth = 0;
110 struct bcache_block *block;
111
112 LTRACEF("num %u\n", blocknum);
113
114 block = NULL;
115 list_for_every_entry(&cache->lru_list, block, struct bcache_block, node) {
116 LTRACEF("looking at entry %p, num %u\n", block, block->blocknum);
117 depth++;
118
119 if (block->blocknum == blocknum) {
120 list_delete(&block->node);
121 list_add_tail(&cache->lru_list, &block->node);
122 cache->stats.hits++;
123 cache->stats.depth += depth;
124 return block;
125 }
126 }
127
128 cache->stats.misses++;
129 return NULL;
130 }
131
132 /* allocate a new block */
alloc_block(struct bcache * cache)133 static struct bcache_block *alloc_block(struct bcache *cache) {
134 int err;
135 struct bcache_block *block;
136
137 /* pop one off the free list if it's present */
138 block = list_remove_head_type(&cache->free_list, struct bcache_block, node);
139 if (block) {
140 block->ref_count = 0;
141 list_add_tail(&cache->lru_list, &block->node);
142 LTRACEF("found block %p on free list\n", block);
143 return block;
144 }
145
146 /* walk the lru, looking for a free block */
147 list_for_every_entry(&cache->lru_list, block, struct bcache_block, node) {
148 LTRACEF("looking at %p, num %u\n", block, block->blocknum);
149 if (block->ref_count == 0) {
150 if (block->is_dirty) {
151 err = flush_block(cache, block);
152 if (err)
153 return NULL;
154 }
155
156 // add it to the tail of the lru
157 list_delete(&block->node);
158 list_add_tail(&cache->lru_list, &block->node);
159 return block;
160 }
161 }
162
163 return NULL;
164 }
165
find_or_fill_block(struct bcache * cache,uint blocknum)166 static struct bcache_block *find_or_fill_block(struct bcache *cache, uint blocknum) {
167 int err;
168
169 LTRACEF("block %u\n", blocknum);
170
171 /* see if it's already in the cache */
172 struct bcache_block *block = find_block(cache, blocknum);
173 if (block == NULL) {
174 LTRACEF("wasn't allocated\n");
175
176 /* allocate a new block and fill it */
177 block = alloc_block(cache);
178 DEBUG_ASSERT(block);
179
180 LTRACEF("wasn't allocated, new block %p\n", block);
181
182 block->blocknum = blocknum;
183 err = bio_read(cache->dev, block->ptr, (off_t)blocknum * cache->block_size, cache->block_size);
184 if (err < 0) {
185 /* free the block, return an error */
186 list_add_tail(&cache->free_list, &block->node);
187 return NULL;
188 }
189
190 cache->stats.reads++;
191 }
192
193 DEBUG_ASSERT(block->blocknum == blocknum);
194
195 return block;
196 }
197
bcache_read_block(bcache_t _cache,void * buf,uint blocknum)198 int bcache_read_block(bcache_t _cache, void *buf, uint blocknum) {
199 struct bcache *cache = _cache;
200
201 LTRACEF("buf %p, blocknum %u\n", buf, blocknum);
202
203 struct bcache_block *block = find_or_fill_block(cache, blocknum);
204 if (block == NULL) {
205 /* error */
206 return -1;
207 }
208
209 memcpy(buf, block->ptr, cache->block_size);
210 return 0;
211 }
212
bcache_get_block(bcache_t _cache,void ** ptr,uint blocknum)213 int bcache_get_block(bcache_t _cache, void **ptr, uint blocknum) {
214 struct bcache *cache = _cache;
215
216 LTRACEF("ptr %p, blocknum %u\n", ptr, blocknum);
217
218 DEBUG_ASSERT(ptr);
219
220 struct bcache_block *block = find_or_fill_block(cache, blocknum);
221 if (block == NULL) {
222 /* error */
223 return -1;
224 }
225
226 /* increment the ref count to keep it from being freed */
227 block->ref_count++;
228 *ptr = block->ptr;
229
230 return 0;
231 }
232
bcache_put_block(bcache_t _cache,uint blocknum)233 int bcache_put_block(bcache_t _cache, uint blocknum) {
234 struct bcache *cache = _cache;
235
236 LTRACEF("blocknum %u\n", blocknum);
237
238 struct bcache_block *block = find_block(cache, blocknum);
239
240 /* be pretty hard on the caller for now */
241 DEBUG_ASSERT(block);
242 DEBUG_ASSERT(block->ref_count > 0);
243
244 block->ref_count--;
245
246 return 0;
247 }
248
bcache_mark_block_dirty(bcache_t priv,uint blocknum)249 int bcache_mark_block_dirty(bcache_t priv, uint blocknum) {
250 int err;
251 struct bcache *cache = priv;
252 struct bcache_block *block;
253
254 block = find_block(cache, blocknum);
255 if (!block) {
256 err = -1;
257 goto exit;
258 }
259
260 block->is_dirty = true;
261 err = 0;
262 exit:
263 return (err);
264 }
265
bcache_zero_block(bcache_t priv,uint blocknum)266 int bcache_zero_block(bcache_t priv, uint blocknum) {
267 int err;
268 struct bcache *cache = priv;
269 struct bcache_block *block;
270
271 block = find_block(cache, blocknum);
272 if (!block) {
273 block = alloc_block(cache);
274 if (!block) {
275 err = -1;
276 goto exit;
277 }
278
279 block->blocknum = blocknum;
280 }
281
282 memset(block->ptr, 0, cache->block_size);
283 block->is_dirty = true;
284 err = 0;
285 exit:
286 return (err);
287 }
288
bcache_flush(bcache_t priv)289 int bcache_flush(bcache_t priv) {
290 int err;
291 struct bcache *cache = priv;
292 struct bcache_block *block;
293
294 list_for_every_entry(&cache->lru_list, block, struct bcache_block, node) {
295 if (block->is_dirty) {
296 err = flush_block(cache, block);
297 if (err)
298 goto exit;
299 }
300 }
301
302 err = 0;
303 exit:
304 return (err);
305 }
306
bcache_dump(bcache_t priv,const char * name)307 void bcache_dump(bcache_t priv, const char *name) {
308 uint32_t finds;
309 struct bcache *cache = priv;
310
311 finds = cache->stats.hits + cache->stats.misses;
312
313 printf("%s: hits=%u(%u%%) depth=%u misses=%u(%u%%) reads=%u writes=%u\n",
314 name,
315 cache->stats.hits,
316 finds ? (cache->stats.hits * 100) / finds : 0,
317 cache->stats.hits ? cache->stats.depth / cache->stats.hits : 0,
318 cache->stats.misses,
319 finds ? (cache->stats.misses * 100) / finds : 0,
320 cache->stats.reads,
321 cache->stats.writes);
322 }
323