1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO-KVM bridge pseudo device
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
18 #include "vfio.h"
19
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
22 #endif
23
24 struct kvm_vfio_group {
25 struct list_head node;
26 struct file *file;
27 #ifdef CONFIG_SPAPR_TCE_IOMMU
28 struct iommu_group *iommu_group;
29 #endif
30 };
31
32 struct kvm_vfio {
33 struct list_head group_list;
34 struct mutex lock;
35 bool noncoherent;
36 };
37
kvm_vfio_file_set_kvm(struct file * file,struct kvm * kvm)38 static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
39 {
40 void (*fn)(struct file *file, struct kvm *kvm);
41
42 fn = symbol_get(vfio_file_set_kvm);
43 if (!fn)
44 return;
45
46 fn(file, kvm);
47
48 symbol_put(vfio_file_set_kvm);
49 }
50
kvm_vfio_file_enforced_coherent(struct file * file)51 static bool kvm_vfio_file_enforced_coherent(struct file *file)
52 {
53 bool (*fn)(struct file *file);
54 bool ret;
55
56 fn = symbol_get(vfio_file_enforced_coherent);
57 if (!fn)
58 return false;
59
60 ret = fn(file);
61
62 symbol_put(vfio_file_enforced_coherent);
63
64 return ret;
65 }
66
kvm_vfio_file_is_group(struct file * file)67 static bool kvm_vfio_file_is_group(struct file *file)
68 {
69 bool (*fn)(struct file *file);
70 bool ret;
71
72 fn = symbol_get(vfio_file_is_group);
73 if (!fn)
74 return false;
75
76 ret = fn(file);
77
78 symbol_put(vfio_file_is_group);
79
80 return ret;
81 }
82
83 #ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_vfio_file_iommu_group(struct file * file)84 static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
85 {
86 struct iommu_group *(*fn)(struct file *file);
87 struct iommu_group *ret;
88
89 fn = symbol_get(vfio_file_iommu_group);
90 if (!fn)
91 return NULL;
92
93 ret = fn(file);
94
95 symbol_put(vfio_file_iommu_group);
96
97 return ret;
98 }
99
kvm_spapr_tce_release_vfio_group(struct kvm * kvm,struct kvm_vfio_group * kvg)100 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
101 struct kvm_vfio_group *kvg)
102 {
103 if (WARN_ON_ONCE(!kvg->iommu_group))
104 return;
105
106 kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
107 iommu_group_put(kvg->iommu_group);
108 kvg->iommu_group = NULL;
109 }
110 #endif
111
112 /*
113 * Groups can use the same or different IOMMU domains. If the same then
114 * adding a new group may change the coherency of groups we've previously
115 * been told about. We don't want to care about any of that so we retest
116 * each group and bail as soon as we find one that's noncoherent. This
117 * means we only ever [un]register_noncoherent_dma once for the whole device.
118 */
kvm_vfio_update_coherency(struct kvm_device * dev)119 static void kvm_vfio_update_coherency(struct kvm_device *dev)
120 {
121 struct kvm_vfio *kv = dev->private;
122 bool noncoherent = false;
123 struct kvm_vfio_group *kvg;
124
125 mutex_lock(&kv->lock);
126
127 list_for_each_entry(kvg, &kv->group_list, node) {
128 if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
129 noncoherent = true;
130 break;
131 }
132 }
133
134 if (noncoherent != kv->noncoherent) {
135 kv->noncoherent = noncoherent;
136
137 if (kv->noncoherent)
138 kvm_arch_register_noncoherent_dma(dev->kvm);
139 else
140 kvm_arch_unregister_noncoherent_dma(dev->kvm);
141 }
142
143 mutex_unlock(&kv->lock);
144 }
145
kvm_vfio_group_add(struct kvm_device * dev,unsigned int fd)146 static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
147 {
148 struct kvm_vfio *kv = dev->private;
149 struct kvm_vfio_group *kvg;
150 struct file *filp;
151 int ret;
152
153 filp = fget(fd);
154 if (!filp)
155 return -EBADF;
156
157 /* Ensure the FD is a vfio group FD.*/
158 if (!kvm_vfio_file_is_group(filp)) {
159 ret = -EINVAL;
160 goto err_fput;
161 }
162
163 mutex_lock(&kv->lock);
164
165 list_for_each_entry(kvg, &kv->group_list, node) {
166 if (kvg->file == filp) {
167 ret = -EEXIST;
168 goto err_unlock;
169 }
170 }
171
172 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
173 if (!kvg) {
174 ret = -ENOMEM;
175 goto err_unlock;
176 }
177
178 kvg->file = filp;
179 list_add_tail(&kvg->node, &kv->group_list);
180
181 kvm_arch_start_assignment(dev->kvm);
182
183 mutex_unlock(&kv->lock);
184
185 kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
186 kvm_vfio_update_coherency(dev);
187
188 return 0;
189 err_unlock:
190 mutex_unlock(&kv->lock);
191 err_fput:
192 fput(filp);
193 return ret;
194 }
195
kvm_vfio_group_del(struct kvm_device * dev,unsigned int fd)196 static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
197 {
198 struct kvm_vfio *kv = dev->private;
199 struct kvm_vfio_group *kvg;
200 struct fd f;
201 int ret;
202
203 f = fdget(fd);
204 if (!f.file)
205 return -EBADF;
206
207 ret = -ENOENT;
208
209 mutex_lock(&kv->lock);
210
211 list_for_each_entry(kvg, &kv->group_list, node) {
212 if (kvg->file != f.file)
213 continue;
214
215 list_del(&kvg->node);
216 kvm_arch_end_assignment(dev->kvm);
217 #ifdef CONFIG_SPAPR_TCE_IOMMU
218 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
219 #endif
220 kvm_vfio_file_set_kvm(kvg->file, NULL);
221 fput(kvg->file);
222 kfree(kvg);
223 ret = 0;
224 break;
225 }
226
227 mutex_unlock(&kv->lock);
228
229 fdput(f);
230
231 kvm_vfio_update_coherency(dev);
232
233 return ret;
234 }
235
236 #ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_vfio_group_set_spapr_tce(struct kvm_device * dev,void __user * arg)237 static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
238 void __user *arg)
239 {
240 struct kvm_vfio_spapr_tce param;
241 struct kvm_vfio *kv = dev->private;
242 struct kvm_vfio_group *kvg;
243 struct fd f;
244 int ret;
245
246 if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce)))
247 return -EFAULT;
248
249 f = fdget(param.groupfd);
250 if (!f.file)
251 return -EBADF;
252
253 ret = -ENOENT;
254
255 mutex_lock(&kv->lock);
256
257 list_for_each_entry(kvg, &kv->group_list, node) {
258 if (kvg->file != f.file)
259 continue;
260
261 if (!kvg->iommu_group) {
262 kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
263 if (WARN_ON_ONCE(!kvg->iommu_group)) {
264 ret = -EIO;
265 goto err_fdput;
266 }
267 }
268
269 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
270 kvg->iommu_group);
271 break;
272 }
273
274 err_fdput:
275 mutex_unlock(&kv->lock);
276 fdput(f);
277 return ret;
278 }
279 #endif
280
kvm_vfio_set_group(struct kvm_device * dev,long attr,void __user * arg)281 static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
282 void __user *arg)
283 {
284 int32_t __user *argp = arg;
285 int32_t fd;
286
287 switch (attr) {
288 case KVM_DEV_VFIO_GROUP_ADD:
289 if (get_user(fd, argp))
290 return -EFAULT;
291 return kvm_vfio_group_add(dev, fd);
292
293 case KVM_DEV_VFIO_GROUP_DEL:
294 if (get_user(fd, argp))
295 return -EFAULT;
296 return kvm_vfio_group_del(dev, fd);
297
298 #ifdef CONFIG_SPAPR_TCE_IOMMU
299 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
300 return kvm_vfio_group_set_spapr_tce(dev, arg);
301 #endif
302 }
303
304 return -ENXIO;
305 }
306
kvm_vfio_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)307 static int kvm_vfio_set_attr(struct kvm_device *dev,
308 struct kvm_device_attr *attr)
309 {
310 switch (attr->group) {
311 case KVM_DEV_VFIO_GROUP:
312 return kvm_vfio_set_group(dev, attr->attr,
313 u64_to_user_ptr(attr->addr));
314 }
315
316 return -ENXIO;
317 }
318
kvm_vfio_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)319 static int kvm_vfio_has_attr(struct kvm_device *dev,
320 struct kvm_device_attr *attr)
321 {
322 switch (attr->group) {
323 case KVM_DEV_VFIO_GROUP:
324 switch (attr->attr) {
325 case KVM_DEV_VFIO_GROUP_ADD:
326 case KVM_DEV_VFIO_GROUP_DEL:
327 #ifdef CONFIG_SPAPR_TCE_IOMMU
328 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
329 #endif
330 return 0;
331 }
332
333 break;
334 }
335
336 return -ENXIO;
337 }
338
kvm_vfio_release(struct kvm_device * dev)339 static void kvm_vfio_release(struct kvm_device *dev)
340 {
341 struct kvm_vfio *kv = dev->private;
342 struct kvm_vfio_group *kvg, *tmp;
343
344 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
345 #ifdef CONFIG_SPAPR_TCE_IOMMU
346 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
347 #endif
348 kvm_vfio_file_set_kvm(kvg->file, NULL);
349 fput(kvg->file);
350 list_del(&kvg->node);
351 kfree(kvg);
352 kvm_arch_end_assignment(dev->kvm);
353 }
354
355 kvm_vfio_update_coherency(dev);
356
357 kfree(kv);
358 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
359 }
360
361 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
362
363 static struct kvm_device_ops kvm_vfio_ops = {
364 .name = "kvm-vfio",
365 .create = kvm_vfio_create,
366 .release = kvm_vfio_release,
367 .set_attr = kvm_vfio_set_attr,
368 .has_attr = kvm_vfio_has_attr,
369 };
370
kvm_vfio_create(struct kvm_device * dev,u32 type)371 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
372 {
373 struct kvm_device *tmp;
374 struct kvm_vfio *kv;
375
376 /* Only one VFIO "device" per VM */
377 list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
378 if (tmp->ops == &kvm_vfio_ops)
379 return -EBUSY;
380
381 kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
382 if (!kv)
383 return -ENOMEM;
384
385 INIT_LIST_HEAD(&kv->group_list);
386 mutex_init(&kv->lock);
387
388 dev->private = kv;
389
390 return 0;
391 }
392
kvm_vfio_ops_init(void)393 int kvm_vfio_ops_init(void)
394 {
395 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
396 }
397
kvm_vfio_ops_exit(void)398 void kvm_vfio_ops_exit(void)
399 {
400 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
401 }
402