1 /*
2 * Copyright (C) 2020-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <errno.h>
8 #include <pthread.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <strings.h>
13 #include <assert.h>
14 #include <stdbool.h>
15 #include <sys/user.h>
16
17 #include "dm.h"
18 #include "vmmapi.h"
19 #include "acpi.h"
20 #include "inout.h"
21 #include "mem.h"
22 #include "log.h"
23 #include "mmio_dev.h"
24
25 #define MAX_MMIO_DEV_NUM 2
26
27 static struct mmio_dev mmio_devs[MAX_MMIO_DEV_NUM];
28 static uint32_t mmio_dev_idx = 0U;
29
30 struct mmio_dev_ops {
31 char *name;
32 int (*init)(struct vmctx *, struct acrn_mmiodev *);
33 void (*deinit)(struct vmctx *, struct acrn_mmiodev *);
34 };
35
36
37 SET_DECLARE(mmio_dev_ops_set, struct mmio_dev_ops);
38 #define DEFINE_MMIO_DEV(x) DATA_SET(mmio_dev_ops_set, x)
39
40 SET_DECLARE(acpi_dev_pt_ops_set, struct acpi_dev_pt_ops);
41
42 struct mmio_dev_ops pt_mmiodev;
43
44 static uint32_t mmio_dev_base = MMIO_DEV_BASE;
45
ltrim(char * s)46 static inline char *ltrim(char *s)
47 {
48 while (*s && *s == ' ')
49 s++;
50 return s;
51 }
52
get_mmiodev(char * name)53 struct mmio_dev *get_mmiodev(char *name)
54 {
55 int i;
56 struct mmio_dev *dev;
57
58 for (i = 0; i < mmio_dev_idx; i++) {
59 dev = &mmio_devs[i];
60 if (!strncmp(dev->name, name, 16)) {
61 return dev;
62 }
63 }
64
65 return NULL;
66 }
67
alloc_mmiodev(void)68 struct mmio_dev *alloc_mmiodev(void)
69 {
70 return (mmio_dev_idx >= MAX_MMIO_DEV_NUM) ? NULL : &mmio_devs[mmio_dev_idx++];
71 }
72
mmio_dev_alloc_gpa_resource32(uint32_t * addr,uint32_t size_in)73 int mmio_dev_alloc_gpa_resource32(uint32_t *addr, uint32_t size_in)
74 {
75 uint32_t base, size;
76
77 size = roundup2(size_in, PAGE_SIZE);
78 base = roundup2(mmio_dev_base, size);
79 if (base + size <= MMIO_DEV_LIMIT) {
80 *addr = base;
81 mmio_dev_base = base + size;
82 return 0;
83 } else {
84 return -1;
85 }
86 }
87
create_pt_acpidev(char * opt)88 int create_pt_acpidev(char *opt)
89 {
90 struct mmio_dev *dev;
91 struct acpi_dev_pt_ops **adptops, *ops;
92
93 SET_FOREACH(adptops, acpi_dev_pt_ops_set) {
94 ops = *adptops;
95 if (ops->match && ops->match(opt)) {
96 dev = alloc_mmiodev();
97 if (!dev) {
98 pr_err("Failed to create MMIO device %s due to exceeding max MMIO device number\n", opt);
99 return -EINVAL;
100 }
101
102 return ops->init ? ops->init(opt, dev) : -1;
103 }
104 }
105
106 pr_err("Unrecognized or unsupported ACPI device HID: %s\n", opt);
107 return -EINVAL;
108 }
109
110 /**
111 * Parse /proc/iomem to see if there's an entry that contains name.
112 * Returns false if not found,
113 * Returns true if an entry is found, and res_start and res_size will be filled
114 * to the start and (end - start + 1) of the first found entry.
115 *
116 * @pre (name != NULL) && (strlen(name) > 0)
117 */
get_mmio_hpa_resource(char * name,uint64_t * res_start,uint64_t * res_size)118 bool get_mmio_hpa_resource(char *name, uint64_t *res_start, uint64_t *res_size)
119 {
120 FILE *fp;
121 uint64_t start, end;
122 bool found = false;
123 char line[128];
124 char *cp;
125
126 fp = fopen("/proc/iomem", "r");
127 if (!fp) {
128 pr_err("Error opening /proc/iomem\n");
129 return false;
130 }
131
132 while (fgets(line, sizeof(line), fp)) {
133 if (strstr(line, name)) {
134 if ((!dm_strtoul(ltrim(line), &cp, 16, &start) && *cp == '-') &&
135 (!dm_strtoul(cp + 1, &cp, 16, &end))) {
136 if ((start == 0) && (end == 0)) {
137 pr_err("Please run acrn-dm with superuser privilege\n");
138 break;
139 }
140 } else {
141 pr_err("Parsing /proc/iomem failed\n");
142 break;
143 }
144
145 *res_start = start;
146 /* proc/iomem displays regions like: 000-fff so we add 1 as size */
147 *res_size = end - start + 1;
148 found = true;
149 break;
150 }
151 }
152
153 fclose(fp);
154 return found;
155 }
156
157 /**
158 * Search /sys/bus/acpi/devices for given HID and fill modalias to ops.
159 * (TODO: we may add more functionality later when we support pt
160 * of other ACPI dev)
161 * According to https://www.kernel.org/doc/Documentation/acpi/namespace.txt,
162 *
163 * The Linux ACPI subsystem converts ACPI namespace objects into a Linux
164 * device tree under the /sys/devices/LNXSYSTEM:00 and updates it upon
165 * receiving ACPI hotplug notification events. For each device object in this
166 * hierarchy there is a corresponding symbolic link in the
167 * /sys/bus/acpi/devices.
168 */
get_more_acpi_dev_info(char * hid,uint32_t instance,struct acpi_dev_pt_ops * ops)169 int get_more_acpi_dev_info(char *hid, uint32_t instance, struct acpi_dev_pt_ops *ops)
170 {
171 char pathbuf[128], line[32];
172 int ret = -1;
173 size_t ch_read;
174 FILE *fp;
175
176 snprintf(pathbuf, sizeof(pathbuf), "/sys/bus/acpi/devices/%s:%02x/modalias", hid, instance);
177 fp = fopen(pathbuf, "r");
178 if (!fp)
179 return ret;
180
181 ch_read = fread(line, 1, sizeof(line), fp);
182 if (!ch_read)
183 goto out;
184
185 memcpy(ops->hid, hid, 8);
186 memcpy(ops->modalias, line, ch_read);
187 ret = 0;
188
189 out:
190 fclose(fp);
191 return ret;
192 }
193
acpi_dev_write_dsdt(struct vmctx * ctx)194 void acpi_dev_write_dsdt(struct vmctx *ctx)
195 {
196 struct acpi_dev_pt_ops **adptops, *ops;
197
198 SET_FOREACH(adptops, acpi_dev_pt_ops_set) {
199 ops = *adptops;
200 if (ops->write_dsdt)
201 ops->write_dsdt(ctx);
202 }
203 }
204
create_pt_mmiodev(char * opt)205 int create_pt_mmiodev(char *opt)
206 {
207
208 int err = 0;
209 uint64_t base_hpa, size;
210 struct mmio_dev *dev;
211 char *cp;
212
213 dev = alloc_mmiodev();
214 if (!dev) {
215 pr_err("MMIO dev number exceed MAX_MMIO_DEV_NUM!!!\n");
216 return -EINVAL;
217 }
218
219 if((!dm_strtoul(opt, &cp, 16, &base_hpa) && *cp == ',') &&
220 (!dm_strtoul(cp + 1, &cp, 16, &size))) {
221 pr_dbg("%s pt mmiodev base: 0x%lx, size: 0x%lx\n", __func__, base_hpa, size);
222 strncpy(dev->name, pt_mmiodev.name, 8);
223 dev->dev.res[0].host_pa = base_hpa;
224 dev->dev.res[0].size = size;
225 } else {
226 pr_err("%s, %s invalid, please check!\n", __func__, opt);
227 }
228
229 return err;
230 }
231
mmio_dev_finddev(char * name)232 static struct mmio_dev_ops *mmio_dev_finddev(char *name)
233 {
234 struct mmio_dev_ops **mdpp, *mdp;
235
236 SET_FOREACH(mdpp, mmio_dev_ops_set) {
237 mdp = *mdpp;
238 if (!strcmp(mdp->name, name))
239 return mdp;
240 }
241
242 return NULL;
243 }
244
init_mmio_dev(struct vmctx * ctx,struct mmio_dev_ops * ops,struct acrn_mmiodev * mmiodev)245 int init_mmio_dev(struct vmctx *ctx, struct mmio_dev_ops *ops, struct acrn_mmiodev *mmiodev)
246 {
247 int ret;
248 uint32_t base;
249
250 if (mmiodev->res[0].user_vm_pa == 0UL) {
251 /* FIXME: The mmio_dev_alloc_gpa_resource32 needs to add one new parameter to indicate
252 * if the caller needs one specific GPA instead of dynamic allocation.
253 */
254 ret = mmio_dev_alloc_gpa_resource32(&base, mmiodev->res[0].size);
255 if (ret < 0)
256 return ret;
257 mmiodev->res[0].user_vm_pa = base;
258 }
259
260 return ops->init(ctx, mmiodev);
261 }
262
deinit_mmio_dev(struct vmctx * ctx,struct mmio_dev_ops * ops,struct acrn_mmiodev * mmiodev)263 void deinit_mmio_dev(struct vmctx *ctx, struct mmio_dev_ops *ops, struct acrn_mmiodev *mmiodev)
264 {
265 ops->deinit(ctx, mmiodev);
266 }
267
init_mmio_devs(struct vmctx * ctx)268 int init_mmio_devs(struct vmctx *ctx)
269 {
270 int i, err = 0;
271 struct mmio_dev_ops *ops;
272
273 for (i = 0; i < MAX_MMIO_DEV_NUM; i++) {
274 ops = mmio_dev_finddev(mmio_devs[i].name);
275 if (ops != NULL) {
276 err = init_mmio_dev(ctx, ops, &mmio_devs[i].dev);
277 pr_notice("mmiodev[%d] hpa:0x%x gpa:0x%x size:0x%x err:%d\n", i,
278 mmio_devs[i].dev.res[0].host_pa, mmio_devs[i].dev.res[0].user_vm_pa,
279 mmio_devs[i].dev.res[0].size, err);
280 }
281
282 if (err != 0)
283 goto init_mmio_devs_fail;
284 }
285
286 return 0;
287
288 init_mmio_devs_fail:
289 for (; i>=0; i--) {
290 ops = mmio_dev_finddev(mmio_devs[i].name);
291 if (ops != NULL)
292 deinit_mmio_dev(ctx, ops, &mmio_devs[i].dev);
293 }
294
295 return err;
296 }
297
deinit_mmio_devs(struct vmctx * ctx)298 void deinit_mmio_devs(struct vmctx *ctx)
299 {
300 int i;
301 struct mmio_dev_ops *ops;
302
303 for (i = 0; i < MAX_MMIO_DEV_NUM; i++) {
304 ops = mmio_dev_finddev(mmio_devs[i].name);
305 if (ops != NULL)
306 deinit_mmio_dev(ctx, ops, &mmio_devs[i].dev);
307
308 }
309 }
310
init_pt_mmiodev(struct vmctx * ctx,struct acrn_mmiodev * dev)311 static int init_pt_mmiodev(struct vmctx *ctx, struct acrn_mmiodev *dev)
312 {
313 return vm_assign_mmiodev(ctx, dev);
314 }
315
deinit_pt_mmiodev(struct vmctx * ctx,struct acrn_mmiodev * dev)316 static void deinit_pt_mmiodev(struct vmctx *ctx, struct acrn_mmiodev *dev)
317 {
318 vm_deassign_mmiodev(ctx, dev);
319 }
320
321 struct mmio_dev_ops tpm2 = {
322 .name = "MSFT0101",
323 /* ToDo: we may allocate the gpa MMIO resource in a reserved MMIO region
324 * rether than hard-coded here.
325 */
326 .init = init_pt_mmiodev,
327 .deinit = deinit_pt_mmiodev,
328 };
329 DEFINE_MMIO_DEV(tpm2);
330
331 struct mmio_dev_ops pt_mmiodev = {
332 .name = "MMIODEV",
333 /* ToDo: we may allocate the gpa MMIO resource in a reserved MMIO region
334 * rether than hard-coded here.
335 */
336 .init = init_pt_mmiodev,
337 .deinit = deinit_pt_mmiodev,
338 };
339 DEFINE_MMIO_DEV(pt_mmiodev);
340