1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
5 */
6 #ifndef __VFIO_VFIO_H__
7 #define __VFIO_VFIO_H__
8
9 #include <linux/file.h>
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/module.h>
13 #include <linux/vfio.h>
14
15 struct iommufd_ctx;
16 struct iommu_group;
17 struct vfio_container;
18
19 void vfio_device_put_registration(struct vfio_device *device);
20 bool vfio_device_try_get_registration(struct vfio_device *device);
21 int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd);
22 void vfio_device_close(struct vfio_device *device,
23 struct iommufd_ctx *iommufd);
24
25 extern const struct file_operations vfio_device_fops;
26
27 enum vfio_group_type {
28 /*
29 * Physical device with IOMMU backing.
30 */
31 VFIO_IOMMU,
32
33 /*
34 * Virtual device without IOMMU backing. The VFIO core fakes up an
35 * iommu_group as the iommu_group sysfs interface is part of the
36 * userspace ABI. The user of these devices must not be able to
37 * directly trigger unmediated DMA.
38 */
39 VFIO_EMULATED_IOMMU,
40
41 /*
42 * Physical device without IOMMU backing. The VFIO core fakes up an
43 * iommu_group as the iommu_group sysfs interface is part of the
44 * userspace ABI. Users can trigger unmediated DMA by the device,
45 * usage is highly dangerous, requires an explicit opt-in and will
46 * taint the kernel.
47 */
48 VFIO_NO_IOMMU,
49 };
50
51 struct vfio_group {
52 struct device dev;
53 struct cdev cdev;
54 /*
55 * When drivers is non-zero a driver is attached to the struct device
56 * that provided the iommu_group and thus the iommu_group is a valid
57 * pointer. When drivers is 0 the driver is being detached. Once users
58 * reaches 0 then the iommu_group is invalid.
59 */
60 refcount_t drivers;
61 unsigned int container_users;
62 struct iommu_group *iommu_group;
63 struct vfio_container *container;
64 struct list_head device_list;
65 struct mutex device_lock;
66 struct list_head vfio_next;
67 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
68 struct list_head container_next;
69 #endif
70 enum vfio_group_type type;
71 struct mutex group_lock;
72 struct kvm *kvm;
73 struct file *opened_file;
74 struct blocking_notifier_head notifier;
75 struct iommufd_ctx *iommufd;
76 spinlock_t kvm_ref_lock;
77 };
78
79 int vfio_device_set_group(struct vfio_device *device,
80 enum vfio_group_type type);
81 void vfio_device_remove_group(struct vfio_device *device);
82 void vfio_device_group_register(struct vfio_device *device);
83 void vfio_device_group_unregister(struct vfio_device *device);
84 int vfio_device_group_use_iommu(struct vfio_device *device);
85 void vfio_device_group_unuse_iommu(struct vfio_device *device);
86 void vfio_device_group_close(struct vfio_device *device);
87 bool vfio_device_has_container(struct vfio_device *device);
88 int __init vfio_group_init(void);
89 void vfio_group_cleanup(void);
90
vfio_device_is_noiommu(struct vfio_device * vdev)91 static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
92 {
93 return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
94 vdev->group->type == VFIO_NO_IOMMU;
95 }
96
97 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
98 /**
99 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
100 */
101 struct vfio_iommu_driver_ops {
102 char *name;
103 struct module *owner;
104 void *(*open)(unsigned long arg);
105 void (*release)(void *iommu_data);
106 long (*ioctl)(void *iommu_data, unsigned int cmd,
107 unsigned long arg);
108 int (*attach_group)(void *iommu_data,
109 struct iommu_group *group,
110 enum vfio_group_type);
111 void (*detach_group)(void *iommu_data,
112 struct iommu_group *group);
113 int (*pin_pages)(void *iommu_data,
114 struct iommu_group *group,
115 dma_addr_t user_iova,
116 int npage, int prot,
117 struct page **pages);
118 void (*unpin_pages)(void *iommu_data,
119 dma_addr_t user_iova, int npage);
120 void (*register_device)(void *iommu_data,
121 struct vfio_device *vdev);
122 void (*unregister_device)(void *iommu_data,
123 struct vfio_device *vdev);
124 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
125 void *data, size_t count, bool write);
126 struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
127 struct iommu_group *group);
128 };
129
130 struct vfio_iommu_driver {
131 const struct vfio_iommu_driver_ops *ops;
132 struct list_head vfio_next;
133 };
134
135 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
136 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
137
138 struct vfio_container *vfio_container_from_file(struct file *filep);
139 int vfio_group_use_container(struct vfio_group *group);
140 void vfio_group_unuse_container(struct vfio_group *group);
141 int vfio_container_attach_group(struct vfio_container *container,
142 struct vfio_group *group);
143 void vfio_group_detach_container(struct vfio_group *group);
144 void vfio_device_container_register(struct vfio_device *device);
145 void vfio_device_container_unregister(struct vfio_device *device);
146 int vfio_device_container_pin_pages(struct vfio_device *device,
147 dma_addr_t iova, int npage,
148 int prot, struct page **pages);
149 void vfio_device_container_unpin_pages(struct vfio_device *device,
150 dma_addr_t iova, int npage);
151 int vfio_device_container_dma_rw(struct vfio_device *device,
152 dma_addr_t iova, void *data,
153 size_t len, bool write);
154
155 int __init vfio_container_init(void);
156 void vfio_container_cleanup(void);
157 #else
158 static inline struct vfio_container *
vfio_container_from_file(struct file * filep)159 vfio_container_from_file(struct file *filep)
160 {
161 return NULL;
162 }
163
vfio_group_use_container(struct vfio_group * group)164 static inline int vfio_group_use_container(struct vfio_group *group)
165 {
166 return -EOPNOTSUPP;
167 }
168
vfio_group_unuse_container(struct vfio_group * group)169 static inline void vfio_group_unuse_container(struct vfio_group *group)
170 {
171 }
172
vfio_container_attach_group(struct vfio_container * container,struct vfio_group * group)173 static inline int vfio_container_attach_group(struct vfio_container *container,
174 struct vfio_group *group)
175 {
176 return -EOPNOTSUPP;
177 }
178
vfio_group_detach_container(struct vfio_group * group)179 static inline void vfio_group_detach_container(struct vfio_group *group)
180 {
181 }
182
vfio_device_container_register(struct vfio_device * device)183 static inline void vfio_device_container_register(struct vfio_device *device)
184 {
185 }
186
vfio_device_container_unregister(struct vfio_device * device)187 static inline void vfio_device_container_unregister(struct vfio_device *device)
188 {
189 }
190
vfio_device_container_pin_pages(struct vfio_device * device,dma_addr_t iova,int npage,int prot,struct page ** pages)191 static inline int vfio_device_container_pin_pages(struct vfio_device *device,
192 dma_addr_t iova, int npage,
193 int prot, struct page **pages)
194 {
195 return -EOPNOTSUPP;
196 }
197
vfio_device_container_unpin_pages(struct vfio_device * device,dma_addr_t iova,int npage)198 static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
199 dma_addr_t iova, int npage)
200 {
201 }
202
vfio_device_container_dma_rw(struct vfio_device * device,dma_addr_t iova,void * data,size_t len,bool write)203 static inline int vfio_device_container_dma_rw(struct vfio_device *device,
204 dma_addr_t iova, void *data,
205 size_t len, bool write)
206 {
207 return -EOPNOTSUPP;
208 }
209
vfio_container_init(void)210 static inline int vfio_container_init(void)
211 {
212 return 0;
213 }
vfio_container_cleanup(void)214 static inline void vfio_container_cleanup(void)
215 {
216 }
217 #endif
218
219 #if IS_ENABLED(CONFIG_IOMMUFD)
220 int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
221 void vfio_iommufd_unbind(struct vfio_device *device);
222 #else
vfio_iommufd_bind(struct vfio_device * device,struct iommufd_ctx * ictx)223 static inline int vfio_iommufd_bind(struct vfio_device *device,
224 struct iommufd_ctx *ictx)
225 {
226 return -EOPNOTSUPP;
227 }
228
vfio_iommufd_unbind(struct vfio_device * device)229 static inline void vfio_iommufd_unbind(struct vfio_device *device)
230 {
231 }
232 #endif
233
234 #if IS_ENABLED(CONFIG_VFIO_VIRQFD)
235 int __init vfio_virqfd_init(void);
236 void vfio_virqfd_exit(void);
237 #else
vfio_virqfd_init(void)238 static inline int __init vfio_virqfd_init(void)
239 {
240 return 0;
241 }
vfio_virqfd_exit(void)242 static inline void vfio_virqfd_exit(void)
243 {
244 }
245 #endif
246
247 #ifdef CONFIG_VFIO_NOIOMMU
248 extern bool vfio_noiommu __read_mostly;
249 #else
250 enum { vfio_noiommu = false };
251 #endif
252
253 #ifdef CONFIG_HAVE_KVM
254 void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
255 void vfio_device_put_kvm(struct vfio_device *device);
256 #else
_vfio_device_get_kvm_safe(struct vfio_device * device,struct kvm * kvm)257 static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
258 struct kvm *kvm)
259 {
260 }
261
vfio_device_put_kvm(struct vfio_device * device)262 static inline void vfio_device_put_kvm(struct vfio_device *device)
263 {
264 }
265 #endif
266
267 #endif
268