1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) Nelson Integration, LLC 2016
4  * Author: Eric Nelson<eric@nelint.com>
5  *
6  */
7 #include <blk.h>
8 #include <log.h>
9 #include <malloc.h>
10 #include <part.h>
11 #include <asm/global_data.h>
12 #include <linux/ctype.h>
13 #include <linux/list.h>
14 
15 struct block_cache_node {
16 	struct list_head lh;
17 	int iftype;
18 	int devnum;
19 	lbaint_t start;
20 	lbaint_t blkcnt;
21 	unsigned long blksz;
22 	char *cache;
23 };
24 
25 static LIST_HEAD(block_cache);
26 
27 static struct block_cache_stats _stats = {
28 	.max_blocks_per_entry = 8,
29 	.max_entries = 32
30 };
31 
cache_find(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz)32 static struct block_cache_node *cache_find(int iftype, int devnum,
33 					   lbaint_t start, lbaint_t blkcnt,
34 					   unsigned long blksz)
35 {
36 	struct block_cache_node *node;
37 
38 	list_for_each_entry(node, &block_cache, lh)
39 		if ((node->iftype == iftype) &&
40 		    (node->devnum == devnum) &&
41 		    (node->blksz == blksz) &&
42 		    (node->start <= start) &&
43 		    (node->start + node->blkcnt >= start + blkcnt)) {
44 			if (block_cache.next != &node->lh) {
45 				/* maintain MRU ordering */
46 				list_del(&node->lh);
47 				list_add(&node->lh, &block_cache);
48 			}
49 			return node;
50 		}
51 	return 0;
52 }
53 
blkcache_read(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz,void * buffer)54 int blkcache_read(int iftype, int devnum,
55 		  lbaint_t start, lbaint_t blkcnt,
56 		  unsigned long blksz, void *buffer)
57 {
58 	struct block_cache_node *node = cache_find(iftype, devnum, start,
59 						   blkcnt, blksz);
60 	if (node) {
61 		const char *src = node->cache + (start - node->start) * blksz;
62 		memcpy(buffer, src, blksz * blkcnt);
63 		debug("hit: start " LBAF ", count " LBAFU "\n",
64 		      start, blkcnt);
65 		++_stats.hits;
66 		return 1;
67 	}
68 
69 	debug("miss: start " LBAF ", count " LBAFU "\n",
70 	      start, blkcnt);
71 	++_stats.misses;
72 	return 0;
73 }
74 
blkcache_fill(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz,void const * buffer)75 void blkcache_fill(int iftype, int devnum,
76 		   lbaint_t start, lbaint_t blkcnt,
77 		   unsigned long blksz, void const *buffer)
78 {
79 	lbaint_t bytes;
80 	struct block_cache_node *node;
81 
82 	/* don't cache big stuff */
83 	if (blkcnt > _stats.max_blocks_per_entry)
84 		return;
85 
86 	if (_stats.max_entries == 0)
87 		return;
88 
89 	bytes = blksz * blkcnt;
90 	if (_stats.max_entries <= _stats.entries) {
91 		/* pop LRU */
92 		node = (struct block_cache_node *)block_cache.prev;
93 		list_del(&node->lh);
94 		_stats.entries--;
95 		debug("drop: start " LBAF ", count " LBAFU "\n",
96 		      node->start, node->blkcnt);
97 		if (node->blkcnt * node->blksz < bytes) {
98 			free(node->cache);
99 			node->cache = 0;
100 		}
101 	} else {
102 		node = malloc(sizeof(*node));
103 		if (!node)
104 			return;
105 		node->cache = 0;
106 	}
107 
108 	if (!node->cache) {
109 		node->cache = malloc(bytes);
110 		if (!node->cache) {
111 			free(node);
112 			return;
113 		}
114 	}
115 
116 	debug("fill: start " LBAF ", count " LBAFU "\n",
117 	      start, blkcnt);
118 
119 	node->iftype = iftype;
120 	node->devnum = devnum;
121 	node->start = start;
122 	node->blkcnt = blkcnt;
123 	node->blksz = blksz;
124 	memcpy(node->cache, buffer, bytes);
125 	list_add(&node->lh, &block_cache);
126 	_stats.entries++;
127 }
128 
blkcache_invalidate(int iftype,int devnum)129 void blkcache_invalidate(int iftype, int devnum)
130 {
131 	struct list_head *entry, *n;
132 	struct block_cache_node *node;
133 
134 	list_for_each_safe(entry, n, &block_cache) {
135 		node = (struct block_cache_node *)entry;
136 		if (iftype == -1 ||
137 		    (node->iftype == iftype && node->devnum == devnum)) {
138 			list_del(entry);
139 			free(node->cache);
140 			free(node);
141 			--_stats.entries;
142 		}
143 	}
144 }
145 
blkcache_configure(unsigned blocks,unsigned entries)146 void blkcache_configure(unsigned blocks, unsigned entries)
147 {
148 	/* invalidate cache if there is a change */
149 	if ((blocks != _stats.max_blocks_per_entry) ||
150 	    (entries != _stats.max_entries))
151 		blkcache_invalidate(-1, 0);
152 
153 	_stats.max_blocks_per_entry = blocks;
154 	_stats.max_entries = entries;
155 
156 	_stats.hits = 0;
157 	_stats.misses = 0;
158 }
159 
blkcache_stats(struct block_cache_stats * stats)160 void blkcache_stats(struct block_cache_stats *stats)
161 {
162 	memcpy(stats, &_stats, sizeof(*stats));
163 	_stats.hits = 0;
164 	_stats.misses = 0;
165 }
166 
blkcache_free(void)167 void blkcache_free(void)
168 {
169 	blkcache_invalidate(-1, 0);
170 }
171