1 /*
2 * Copyright (c) 2014-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <dev/virtio.h>
9 #include <dev/virtio/virtio_ring.h>
10
11 #include <lk/debug.h>
12 #include <assert.h>
13 #include <lk/trace.h>
14 #include <lk/compiler.h>
15 #include <lk/list.h>
16 #include <lk/err.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <lk/pow2.h>
20 #include <lk/init.h>
21 #include <kernel/thread.h>
22 #include <platform/interrupts.h>
23 #if WITH_KERNEL_VM
24 #include <kernel/vm.h>
25 #endif
26
27 #include "virtio_priv.h"
28
29 #if WITH_DEV_VIRTIO_BLOCK
30 #include <dev/virtio/block.h>
31 #endif
32 #if WITH_DEV_VIRTIO_NET
33 #include <dev/virtio/net.h>
34 #endif
35 #if WITH_DEV_VIRTIO_GPU
36 #include <dev/virtio/gpu.h>
37 #endif
38 #if WITH_DEV_VIRTIO_9P
39 #include <dev/virtio/9p.h>
40 #endif
41
42 #define LOCAL_TRACE 0
43
44 static struct virtio_device *devices;
45
dump_mmio_config(const volatile struct virtio_mmio_config * mmio)46 static void dump_mmio_config(const volatile struct virtio_mmio_config *mmio) {
47 printf("mmio at %p\n", mmio);
48 printf("\tmagic 0x%x\n", mmio->magic);
49 printf("\tversion 0x%x\n", mmio->version);
50 printf("\tdevice_id 0x%x\n", mmio->device_id);
51 printf("\tvendor_id 0x%x\n", mmio->vendor_id);
52 printf("\thost_features 0x%x\n", mmio->host_features);
53 printf("\tguest_features 0x%x\n", mmio->guest_features);
54 printf("\tguest_features_sel 0x%x\n", mmio->guest_features_sel);
55 printf("\tguest_page_size %u\n", mmio->guest_page_size);
56 printf("\tqnum %u\n", mmio->queue_num);
57 printf("\tqnum_max %u\n", mmio->queue_num_max);
58 printf("\tqnum_align %u\n", mmio->queue_align);
59 printf("\tqnum_pfn %u\n", mmio->queue_pfn);
60 printf("\tstatus 0x%x\n", mmio->status);
61 }
62
virtio_dump_desc(const struct vring_desc * desc)63 void virtio_dump_desc(const struct vring_desc *desc) {
64 printf("vring descriptor %p\n", desc);
65 printf("\taddr 0x%llx\n", desc->addr);
66 printf("\tlen 0x%x\n", desc->len);
67 printf("\tflags 0x%hx\n", desc->flags);
68 printf("\tnext 0x%hx\n", desc->next);
69 }
70
virtio_mmio_irq(void * arg)71 static enum handler_return virtio_mmio_irq(void *arg) {
72 struct virtio_device *dev = (struct virtio_device *)arg;
73 LTRACEF("dev %p, index %u\n", dev, dev->index);
74
75 uint32_t irq_status = dev->mmio_config->interrupt_status;
76 LTRACEF("status 0x%x\n", irq_status);
77
78 enum handler_return ret = INT_NO_RESCHEDULE;
79 if (irq_status & 0x1) { /* used ring update */
80 // XXX is this safe?
81 dev->mmio_config->interrupt_ack = 0x1;
82
83 /* cycle through all the active rings */
84 for (uint r = 0; r < MAX_VIRTIO_RINGS; r++) {
85 if ((dev->active_rings_bitmap & (1<<r)) == 0)
86 continue;
87
88 struct vring *ring = &dev->ring[r];
89 LTRACEF("ring %u: used flags 0x%hx idx 0x%hx last_used %u\n", r, ring->used->flags, ring->used->idx, ring->last_used);
90
91 uint cur_idx = ring->used->idx;
92 for (uint i = ring->last_used; i != (cur_idx & ring->num_mask); i = (i + 1) & ring->num_mask) {
93 LTRACEF("looking at idx %u\n", i);
94
95 // process chain
96 struct vring_used_elem *used_elem = &ring->used->ring[i];
97 LTRACEF("id %u, len %u\n", used_elem->id, used_elem->len);
98
99 DEBUG_ASSERT(dev->irq_driver_callback);
100 ret |= dev->irq_driver_callback(dev, r, used_elem);
101
102 ring->last_used = (ring->last_used + 1) & ring->num_mask;
103 }
104 }
105 }
106 if (irq_status & 0x2) { /* config change */
107 dev->mmio_config->interrupt_ack = 0x2;
108
109 if (dev->config_change_callback) {
110 ret |= dev->config_change_callback(dev);
111 }
112 }
113
114 LTRACEF("exiting irq\n");
115
116 return ret;
117 }
118
virtio_mmio_detect(void * ptr,uint count,const uint irqs[],size_t stride)119 int virtio_mmio_detect(void *ptr, uint count, const uint irqs[], size_t stride) {
120 LTRACEF("ptr %p, count %u\n", ptr, count);
121
122 DEBUG_ASSERT(ptr);
123 DEBUG_ASSERT(irqs);
124 DEBUG_ASSERT(!devices);
125
126 /* allocate an array big enough to hold a list of devices */
127 devices = calloc(count, sizeof(struct virtio_device));
128 if (!devices)
129 return ERR_NO_MEMORY;
130
131 int found = 0;
132 for (uint i = 0; i < count; i++) {
133 volatile struct virtio_mmio_config *mmio = (struct virtio_mmio_config *)((uint8_t *)ptr + i * stride);
134 struct virtio_device *dev = &devices[i];
135
136 dev->index = i;
137 dev->irq = irqs[i];
138
139 mask_interrupt(irqs[i]);
140 register_int_handler(irqs[i], &virtio_mmio_irq, (void *)dev);
141
142 LTRACEF("looking at %p: magic 0x%x version 0x%x did 0x%x vid 0x%x\n",
143 mmio, mmio->magic, mmio->version, mmio->device_id, mmio->vendor_id);
144
145 if (mmio->magic != VIRTIO_MMIO_MAGIC) {
146 continue;
147 }
148
149 // TODO: handle version 2
150
151 #if LOCAL_TRACE
152 if (mmio->device_id != 0) {
153 dump_mmio_config(mmio);
154 }
155 #endif
156
157 #if WITH_DEV_VIRTIO_BLOCK
158 if (mmio->device_id == 2) { // block device
159 LTRACEF("found block device\n");
160
161 dev->mmio_config = mmio;
162 dev->config_ptr = (void *)mmio->config;
163
164 status_t err = virtio_block_init(dev, virtio_read_host_feature_word(dev, 0));
165 if (err >= 0) {
166 // good device
167 dev->valid = true;
168
169 if (dev->irq_driver_callback)
170 unmask_interrupt(dev->irq);
171 }
172 }
173 #endif // WITH_DEV_VIRTIO_BLOCK
174 #if WITH_DEV_VIRTIO_NET
175 if (mmio->device_id == 1) { // network device
176 LTRACEF("found net device\n");
177
178 dev->mmio_config = mmio;
179 dev->config_ptr = (void *)mmio->config;
180
181 status_t err = virtio_net_init(dev);
182 if (err >= 0) {
183 // good device
184 dev->valid = true;
185
186 if (dev->irq_driver_callback)
187 unmask_interrupt(dev->irq);
188 }
189 }
190 #endif // WITH_DEV_VIRTIO_NET
191 #if WITH_DEV_VIRTIO_9P
192 if (mmio->device_id == 9) { // 9p device
193 LTRACEF("found 9p device\n");
194
195 dev->mmio_config = mmio;
196 dev->config_ptr = (void *)mmio->config;
197
198 status_t err = virtio_9p_init(dev, mmio->host_features);
199 if (err >= 0) {
200 // good device
201 dev->valid = true;
202
203 if (dev->irq_driver_callback)
204 unmask_interrupt(dev->irq);
205
206 virtio_9p_start(dev);
207 }
208 }
209 #endif // WITH_DEV_VIRTIO_9P
210 #if WITH_DEV_VIRTIO_GPU
211 if (mmio->device_id == 0x10) { // virtio-gpu
212 LTRACEF("found gpu device\n");
213
214 dev->mmio_config = mmio;
215 dev->config_ptr = (void *)mmio->config;
216
217 status_t err = virtio_gpu_init(dev, virtio_read_host_feature_word(dev, 0));
218 if (err >= 0) {
219 // good device
220 dev->valid = true;
221
222 if (dev->irq_driver_callback)
223 unmask_interrupt(dev->irq);
224
225 virtio_gpu_start(dev);
226 }
227 }
228 #endif // WITH_DEV_VIRTIO_GPU
229
230 if (dev->valid)
231 found++;
232 }
233
234 return found;
235 }
236
virtio_free_desc(struct virtio_device * dev,uint ring_index,uint16_t desc_index)237 void virtio_free_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index) {
238 LTRACEF("dev %p ring %u index %u free_count %u\n", dev, ring_index, desc_index, dev->ring[ring_index].free_count);
239 dev->ring[ring_index].desc[desc_index].next = dev->ring[ring_index].free_list;
240 dev->ring[ring_index].free_list = desc_index;
241 dev->ring[ring_index].free_count++;
242 }
243
virtio_alloc_desc(struct virtio_device * dev,uint ring_index)244 uint16_t virtio_alloc_desc(struct virtio_device *dev, uint ring_index) {
245 if (dev->ring[ring_index].free_count == 0)
246 return 0xffff;
247
248 DEBUG_ASSERT(dev->ring[ring_index].free_list != 0xffff);
249
250 uint16_t i = dev->ring[ring_index].free_list;
251 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
252 dev->ring[ring_index].free_list = desc->next;
253
254 dev->ring[ring_index].free_count--;
255
256 return i;
257 }
258
virtio_alloc_desc_chain(struct virtio_device * dev,uint ring_index,size_t count,uint16_t * start_index)259 struct vring_desc *virtio_alloc_desc_chain(struct virtio_device *dev, uint ring_index, size_t count, uint16_t *start_index) {
260 if (dev->ring[ring_index].free_count < count)
261 return NULL;
262
263 /* start popping entries off the chain */
264 struct vring_desc *last = 0;
265 uint16_t last_index = 0;
266 while (count > 0) {
267 uint16_t i = dev->ring[ring_index].free_list;
268 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
269
270 dev->ring[ring_index].free_list = desc->next;
271 dev->ring[ring_index].free_count--;
272
273 if (last) {
274 desc->flags = VRING_DESC_F_NEXT;
275 desc->next = last_index;
276 } else {
277 // first one
278 desc->flags = 0;
279 desc->next = 0;
280 }
281 last = desc;
282 last_index = i;
283 count--;
284 }
285
286 if (start_index)
287 *start_index = last_index;
288
289 return last;
290 }
291
virtio_submit_chain(struct virtio_device * dev,uint ring_index,uint16_t desc_index)292 void virtio_submit_chain(struct virtio_device *dev, uint ring_index, uint16_t desc_index) {
293 LTRACEF("dev %p, ring %u, desc %u\n", dev, ring_index, desc_index);
294
295 /* add the chain to the available list */
296 struct vring_avail *avail = dev->ring[ring_index].avail;
297
298 avail->ring[avail->idx & dev->ring[ring_index].num_mask] = desc_index;
299 mb();
300 avail->idx++;
301
302 #if LOCAL_TRACE
303 hexdump(avail, 16);
304 #endif
305 }
306
virtio_kick(struct virtio_device * dev,uint ring_index)307 void virtio_kick(struct virtio_device *dev, uint ring_index) {
308 LTRACEF("dev %p, ring %u\n", dev, ring_index);
309
310 dev->mmio_config->queue_notify = ring_index;
311 mb();
312 }
313
virtio_alloc_ring(struct virtio_device * dev,uint index,uint16_t len)314 status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len) {
315 LTRACEF("dev %p, index %u, len %u\n", dev, index, len);
316
317 DEBUG_ASSERT(dev);
318 DEBUG_ASSERT(len > 0 && ispow2(len));
319 DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);
320
321 if (len == 0 || !ispow2(len))
322 return ERR_INVALID_ARGS;
323
324 struct vring *ring = &dev->ring[index];
325
326 /* allocate a ring */
327 size_t size = vring_size(len, PAGE_SIZE);
328 LTRACEF("need %zu bytes\n", size);
329
330 #if WITH_KERNEL_VM
331 void *vptr;
332 status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
333 if (err < 0)
334 return ERR_NO_MEMORY;
335
336 LTRACEF("allocated virtio_ring at va %p\n", vptr);
337
338 /* compute the physical address */
339 paddr_t pa;
340 pa = vaddr_to_paddr(vptr);
341 if (pa == 0) {
342 return ERR_NO_MEMORY;
343 }
344
345 LTRACEF("virtio_ring at pa 0x%lx\n", pa);
346 #else
347 void *vptr = memalign(PAGE_SIZE, size);
348 if (!vptr)
349 return ERR_NO_MEMORY;
350
351 LTRACEF("ptr %p\n", vptr);
352 memset(vptr, 0, size);
353
354 /* compute the physical address */
355 paddr_t pa = (paddr_t)vptr;
356 #endif
357
358 /* initialize the ring */
359 vring_init(ring, len, vptr, PAGE_SIZE);
360 dev->ring[index].free_list = 0xffff;
361 dev->ring[index].free_count = 0;
362
363 /* add all the descriptors to the free list */
364 for (uint i = 0; i < len; i++) {
365 virtio_free_desc(dev, index, i);
366 }
367
368 /* register the ring with the device */
369 DEBUG_ASSERT(dev->mmio_config);
370 dev->mmio_config->guest_page_size = PAGE_SIZE;
371 dev->mmio_config->queue_sel = index;
372 dev->mmio_config->queue_num = len;
373 dev->mmio_config->queue_align = PAGE_SIZE;
374 dev->mmio_config->queue_pfn = pa / PAGE_SIZE;
375
376 /* mark the ring active */
377 dev->active_rings_bitmap |= (1 << index);
378
379 return NO_ERROR;
380 }
381
virtio_reset_device(struct virtio_device * dev)382 void virtio_reset_device(struct virtio_device *dev) {
383 dev->mmio_config->status = 0;
384 }
385
virtio_status_acknowledge_driver(struct virtio_device * dev)386 void virtio_status_acknowledge_driver(struct virtio_device *dev) {
387 dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
388 }
389
virtio_status_driver_ok(struct virtio_device * dev)390 void virtio_status_driver_ok(struct virtio_device *dev) {
391 dev->mmio_config->status |= VIRTIO_STATUS_DRIVER_OK;
392 }
393
virtio_set_guest_features(struct virtio_device * dev,uint32_t word,uint32_t features)394 void virtio_set_guest_features(struct virtio_device *dev, uint32_t word, uint32_t features) {
395 dev->mmio_config->guest_features_sel = word;
396 dev->mmio_config->guest_features = features;
397 }
398
virtio_read_host_feature_word(struct virtio_device * dev,uint32_t word)399 uint32_t virtio_read_host_feature_word(struct virtio_device *dev, uint32_t word) {
400 dev->mmio_config->host_features_sel = word;
401 return dev->mmio_config->host_features;
402 }
403
virtio_init(uint level)404 static void virtio_init(uint level) {
405 }
406
407 LK_INIT_HOOK(virtio, &virtio_init, LK_INIT_LEVEL_THREADING);
408
409