1 /*-
2 * Copyright (c) 2012 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 /*
30 * Memory ranges are represented with an RB tree. On insertion, the range
31 * is checked for overlaps. On lookup, the key has the same base and limit
32 * so it can be searched within the range.
33 */
34
35 #include <errno.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <pthread.h>
40
41 #include "mem.h"
42 #include "tree.h"
43
44 #define MEMNAMESZ (80)
45
46 struct mmio_rb_range {
47 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
48 struct mem_range mr_param;
49 uint64_t mr_base;
50 uint64_t mr_end;
51 };
52
53 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
54 RB_PROTOTYPE_STATIC(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
55
56 /*
57 * Per-VM cache. Since most accesses from a vCPU will be to
58 * consecutive addresses in a range, it makes sense to cache the
59 * result of a lookup.
60 */
61 static struct mmio_rb_range *mmio_hint __aligned(sizeof(struct mmio_rb_range *));
62
63 static pthread_rwlock_t mmio_rwlock;
64
65 static int
mmio_rb_range_compare(struct mmio_rb_range * a,struct mmio_rb_range * b)66 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
67 {
68 if (a->mr_end < b->mr_base)
69 return -1;
70 else if (a->mr_base > b->mr_end)
71 return 1;
72 return 0;
73 }
74
75 static int
mmio_rb_lookup(struct mmio_rb_tree * rbt,uint64_t addr,struct mmio_rb_range ** entry)76 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
77 struct mmio_rb_range **entry)
78 {
79 struct mmio_rb_range find, *res;
80
81 find.mr_base = find.mr_end = addr;
82
83 res = RB_FIND(mmio_rb_tree, rbt, &find);
84
85 if (res != NULL) {
86 *entry = res;
87 return 0;
88 }
89
90 return -1;
91 }
92
93 static int
mmio_rb_add(struct mmio_rb_tree * rbt,struct mmio_rb_range * new)94 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
95 {
96 struct mmio_rb_range *overlap;
97
98 overlap = RB_INSERT(mmio_rb_tree, rbt, new);
99
100 if (overlap != NULL) {
101 #ifdef RB_DEBUG
102 pr_dbg("overlap detected: new %lx:%lx, tree %lx:%lx\n",
103 new->mr_base, new->mr_end,
104 overlap->mr_base, overlap->mr_end);
105 #endif
106
107 return -1;
108 }
109
110 return 0;
111 }
112
113 #if RB_DEBUG
114 static void
mmio_rb_dump(struct mmio_rb_tree * rbt)115 mmio_rb_dump(struct mmio_rb_tree *rbt)
116 {
117 struct mmio_rb_range *np;
118
119 pthread_rwlock_rdlock(&mmio_rwlock);
120 RB_FOREACH(np, mmio_rb_tree, rbt) {
121 pr_dbg(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
122 np->mr_param.name);
123 }
124 pthread_rwlock_unlock(&mmio_rwlock);
125 }
126 #endif
127
128 RB_GENERATE_STATIC(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
129
130 static int
mem_read(void * ctx,int vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)131 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
132 {
133 int error;
134 struct mem_range *mr = arg;
135
136 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
137 rval, mr->arg1, mr->arg2);
138 return error;
139 }
140
141 static int
mem_write(void * ctx,int vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)142 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
143 {
144 int error;
145 struct mem_range *mr = arg;
146
147 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
148 &wval, mr->arg1, mr->arg2);
149 return error;
150 }
151
152 int
emulate_mem(struct vmctx * ctx,struct acrn_mmio_request * mmio_req)153 emulate_mem(struct vmctx *ctx, struct acrn_mmio_request *mmio_req)
154 {
155 uint64_t paddr = mmio_req->address;
156 int size = mmio_req->size;
157 struct mmio_rb_range *hint, *entry = NULL;
158 int err;
159
160 pthread_rwlock_rdlock(&mmio_rwlock);
161
162 /*
163 * First check the per-VM cache
164 */
165 hint = mmio_hint;
166
167 if (hint && paddr >= hint->mr_base && paddr <= hint->mr_end)
168 entry = hint;
169 else if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
170 /* Update the per-VM cache */
171 mmio_hint = entry;
172 else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
173 pthread_rwlock_unlock(&mmio_rwlock);
174 return -ESRCH;
175 }
176
177 pthread_rwlock_unlock(&mmio_rwlock);
178
179 if (entry == NULL)
180 return -EINVAL;
181
182 if (mmio_req->direction == ACRN_IOREQ_DIR_READ)
183 err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
184 size, &entry->mr_param);
185 else
186 err = mem_write(ctx, 0, paddr, mmio_req->value,
187 size, &entry->mr_param);
188
189 return err;
190 }
191
192 static int
register_mem_int(struct mmio_rb_tree * rbt,struct mem_range * memp)193 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
194 {
195 struct mmio_rb_range *entry, *mrp;
196 int err;
197
198 err = -1;
199
200 mrp = malloc(sizeof(struct mmio_rb_range));
201
202 if (mrp != NULL) {
203 mrp->mr_param = *memp;
204 mrp->mr_base = memp->base;
205 mrp->mr_end = memp->base + memp->size - 1;
206 pthread_rwlock_wrlock(&mmio_rwlock);
207 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
208 err = mmio_rb_add(rbt, mrp);
209 pthread_rwlock_unlock(&mmio_rwlock);
210 if (err)
211 free(mrp);
212 }
213
214 return err;
215 }
216
217 int
register_mem(struct mem_range * memp)218 register_mem(struct mem_range *memp)
219 {
220 return register_mem_int(&mmio_rb_root, memp);
221 }
222
223 int
register_mem_fallback(struct mem_range * memp)224 register_mem_fallback(struct mem_range *memp)
225 {
226 return register_mem_int(&mmio_rb_fallback, memp);
227 }
228
229 static int
unregister_mem_int(struct mmio_rb_tree * rbt,struct mem_range * memp)230 unregister_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
231 {
232 struct mem_range *mr;
233 struct mmio_rb_range *entry = NULL;
234 int err;
235
236 pthread_rwlock_wrlock(&mmio_rwlock);
237 err = mmio_rb_lookup(rbt, memp->base, &entry);
238 if (err == 0) {
239 mr = &entry->mr_param;
240 if (strncmp(mr->name, memp->name, MEMNAMESZ)
241 || (mr->base != memp->base) || (mr->size != memp->size)
242 || ((mr->flags & MEM_F_IMMUTABLE) != 0)) {
243 err = -1;
244 } else {
245 RB_REMOVE(mmio_rb_tree, rbt, entry);
246
247 /* flush Per-VM cache */
248 if (mmio_hint == entry)
249 mmio_hint = NULL;
250
251 free(entry);
252 }
253 }
254 pthread_rwlock_unlock(&mmio_rwlock);
255
256 return err;
257 }
258
259 int
unregister_mem(struct mem_range * memp)260 unregister_mem(struct mem_range *memp)
261 {
262 return unregister_mem_int(&mmio_rb_root, memp);
263 }
264
265 int
unregister_mem_fallback(struct mem_range * memp)266 unregister_mem_fallback(struct mem_range *memp)
267 {
268 return unregister_mem_int(&mmio_rb_fallback, memp);
269 }
270
271 void
init_mem(void)272 init_mem(void)
273 {
274 RB_INIT(&mmio_rb_root);
275 RB_INIT(&mmio_rb_fallback);
276 pthread_rwlock_init(&mmio_rwlock, NULL);
277 }
278