1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  */
7 
8 #include <sys/cdefs.h>
9 #include <sys/param.h>
10 #include <sys/uio.h>
11 #include <sys/ioctl.h>
12 #include <sys/eventfd.h>
13 #include <errno.h>
14 #include <stdarg.h>
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdint.h>
18 #include <unistd.h>
19 #include <pthread.h>
20 #include <linux/vhost.h>
21 
22 #include "dm.h"
23 #include "pci_core.h"
24 #include "irq.h"
25 #include "vmmapi.h"
26 #include "vhost.h"
27 
28 static int vhost_debug;
29 #define LOG_TAG "vhost: "
30 #define DPRINTF(fmt, args...) \
31        do { if (vhost_debug) pr_dbg(LOG_TAG fmt, ##args); } while (0)
32 #define WPRINTF(fmt, args...) pr_err(LOG_TAG fmt, ##args)
33 
34 inline
vhost_kernel_ioctl(struct vhost_dev * vdev,unsigned long int request,void * arg)35 int vhost_kernel_ioctl(struct vhost_dev *vdev,
36 		       unsigned long int request,
37 		       void *arg)
38 {
39 	int rc;
40 
41 	rc = ioctl(vdev->fd, request, arg);
42 	if (rc < 0)
43 		WPRINTF("ioctl failed, fd = %d, request = 0x%lx,"
44 			" rc = %d, errno = %d\n",
45 			vdev->fd, request, rc, errno);
46 	return rc;
47 }
48 
49 static void
vhost_kernel_init(struct vhost_dev * vdev,struct virtio_base * base,int fd,int vq_idx,uint32_t busyloop_timeout)50 vhost_kernel_init(struct vhost_dev *vdev, struct virtio_base *base,
51 		  int fd, int vq_idx, uint32_t busyloop_timeout)
52 {
53 	vdev->base = base;
54 	vdev->fd = fd;
55 	vdev->vq_idx = vq_idx;
56 	vdev->busyloop_timeout = busyloop_timeout;
57 }
58 
59 static void
vhost_kernel_deinit(struct vhost_dev * vdev)60 vhost_kernel_deinit(struct vhost_dev *vdev)
61 {
62 	vdev->base = NULL;
63 	vdev->vq_idx = 0;
64 	vdev->busyloop_timeout = 0;
65 	if (vdev->fd > 0) {
66 		close(vdev->fd);
67 		vdev->fd = -1;
68 	}
69 }
70 
71 static int
vhost_kernel_set_mem_table(struct vhost_dev * vdev,struct vhost_memory * mem)72 vhost_kernel_set_mem_table(struct vhost_dev *vdev,
73 			   struct vhost_memory *mem)
74 {
75 	return vhost_kernel_ioctl(vdev, VHOST_SET_MEM_TABLE, mem);
76 }
77 
78 static int
vhost_kernel_set_vring_addr(struct vhost_dev * vdev,struct vhost_vring_addr * addr)79 vhost_kernel_set_vring_addr(struct vhost_dev *vdev,
80 			    struct vhost_vring_addr *addr)
81 {
82 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_ADDR, addr);
83 }
84 
85 static int
vhost_kernel_set_vring_num(struct vhost_dev * vdev,struct vhost_vring_state * ring)86 vhost_kernel_set_vring_num(struct vhost_dev *vdev,
87 			   struct vhost_vring_state *ring)
88 {
89 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_NUM, ring);
90 }
91 
92 static int
vhost_kernel_set_vring_base(struct vhost_dev * vdev,struct vhost_vring_state * ring)93 vhost_kernel_set_vring_base(struct vhost_dev *vdev,
94 			    struct vhost_vring_state *ring)
95 {
96 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_BASE, ring);
97 }
98 
99 static int
vhost_kernel_get_vring_base(struct vhost_dev * vdev,struct vhost_vring_state * ring)100 vhost_kernel_get_vring_base(struct vhost_dev *vdev,
101 			    struct vhost_vring_state *ring)
102 {
103 	return vhost_kernel_ioctl(vdev, VHOST_GET_VRING_BASE, ring);
104 }
105 
106 static int
vhost_kernel_set_vring_kick(struct vhost_dev * vdev,struct vhost_vring_file * file)107 vhost_kernel_set_vring_kick(struct vhost_dev *vdev,
108 			    struct vhost_vring_file *file)
109 {
110 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_KICK, file);
111 }
112 
113 static int
vhost_kernel_set_vring_call(struct vhost_dev * vdev,struct vhost_vring_file * file)114 vhost_kernel_set_vring_call(struct vhost_dev *vdev,
115 			    struct vhost_vring_file *file)
116 {
117 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_CALL, file);
118 }
119 
120 static int
vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev * vdev,struct vhost_vring_state * s)121 vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *vdev,
122 					struct vhost_vring_state *s)
123 {
124 #ifdef VHOST_SET_VRING_BUSYLOOP_TIMEOUT
125 	return vhost_kernel_ioctl(vdev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
126 #else
127 	return 0;
128 #endif
129 }
130 
131 static int
vhost_kernel_set_features(struct vhost_dev * vdev,uint64_t features)132 vhost_kernel_set_features(struct vhost_dev *vdev,
133 			  uint64_t features)
134 {
135 	return vhost_kernel_ioctl(vdev, VHOST_SET_FEATURES, &features);
136 }
137 
138 static int
vhost_kernel_get_features(struct vhost_dev * vdev,uint64_t * features)139 vhost_kernel_get_features(struct vhost_dev *vdev,
140 			  uint64_t *features)
141 {
142 	return vhost_kernel_ioctl(vdev, VHOST_GET_FEATURES, features);
143 }
144 
145 static int
vhost_kernel_set_owner(struct vhost_dev * vdev)146 vhost_kernel_set_owner(struct vhost_dev *vdev)
147 {
148 	return vhost_kernel_ioctl(vdev, VHOST_SET_OWNER, NULL);
149 }
150 
151 static int
vhost_kernel_reset_device(struct vhost_dev * vdev)152 vhost_kernel_reset_device(struct vhost_dev *vdev)
153 {
154 	return vhost_kernel_ioctl(vdev, VHOST_RESET_OWNER, NULL);
155 }
156 
157 static int
vhost_eventfd_test_and_clear(int fd)158 vhost_eventfd_test_and_clear(int fd)
159 {
160 	uint64_t count = 0;
161 	int rc;
162 
163 	/*
164 	 * each successful read returns an 8-byte integer,
165 	 * a read will set the count to zero (EFD_SEMAPHORE
166 	 * is not specified when eventfd() is called in
167 	 * vhost_vq_init()).
168 	 */
169 	rc = read(fd, &count, sizeof(count));
170 	DPRINTF("read eventfd, rc = %d, errno = %d, count = %ld\n",
171 		rc, errno, count);
172 	return rc > 0 ? 1 : 0;
173 }
174 
175 static int
vhost_vq_register_eventfd(struct vhost_dev * vdev,int idx,bool is_register)176 vhost_vq_register_eventfd(struct vhost_dev *vdev,
177 			  int idx, bool is_register)
178 {
179 	struct acrn_ioeventfd ioeventfd = {0};
180 	struct acrn_irqfd irqfd = {0};
181 	struct virtio_base *base;
182 	struct vhost_vq *vq;
183 	struct virtio_vq_info *vqi;
184 	struct msix_table_entry *mte;
185 	struct acrn_msi_entry msi;
186 	int rc = -1;
187 
188 	/* this interface is called only by vhost_vq_start,
189 	 * parameters have been checked there
190 	 */
191 	base = vdev->base;
192 	vqi = &vdev->base->queues[vdev->vq_idx + idx];
193 	vq = &vdev->vqs[idx];
194 
195 	if (!is_register) {
196 		irqfd.flags = ACRN_IRQFD_FLAG_DEASSIGN;
197 	}
198 
199 	virtio_register_ioeventfd(base, idx, is_register, vq->kick_fd);
200 	/* register irqfd for notify */
201 	mte = &vdev->base->dev->msix.table[vqi->msix_idx];
202 	msi.msi_addr = mte->addr;
203 	msi.msi_data = mte->msg_data;
204 	irqfd.fd = vq->call_fd;
205 	/* no additional flag bit should be set */
206 	irqfd.msi = msi;
207 	DPRINTF("[irqfd: %d][MSIX: %d]\n", irqfd.fd, vqi->msix_idx);
208 	rc = vm_irqfd(vdev->base->dev->vmctx, &irqfd);
209 	if (rc < 0) {
210 		WPRINTF("vm_irqfd failed rc = %d, errno = %d\n", rc, errno);
211 		/* unregister ioeventfd */
212 		if (is_register) {
213 			ioeventfd.flags |= ACRN_IOEVENTFD_FLAG_DEASSIGN;
214 			vm_ioeventfd(vdev->base->dev->vmctx, &ioeventfd);
215 		}
216 		return -1;
217 	}
218 
219 	return 0;
220 }
221 
222 static int
vhost_vq_init(struct vhost_dev * vdev,int idx)223 vhost_vq_init(struct vhost_dev *vdev, int idx)
224 {
225 	struct vhost_vq *vq;
226 
227 	if (!vdev || !vdev->vqs)
228 		goto fail;
229 
230 	vq = &vdev->vqs[idx];
231 	if (!vq)
232 		goto fail;
233 
234 	vq->kick_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
235 	if (vq->kick_fd < 0) {
236 		WPRINTF("create kick_fd failed\n");
237 		goto fail_kick;
238 	}
239 
240 	vq->call_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
241 	if (vq->call_fd < 0) {
242 		WPRINTF("create call_fd failed\n");
243 		goto fail_call;
244 	}
245 
246 	vq->idx = idx;
247 	vq->dev = vdev;
248 	return 0;
249 
250 fail_call:
251 	close(vq->kick_fd);
252 fail_kick:
253 	vq->kick_fd = -1;
254 	vq->call_fd = -1;
255 fail:
256 	return -1;
257 }
258 
259 static int
vhost_vq_deinit(struct vhost_vq * vq)260 vhost_vq_deinit(struct vhost_vq *vq)
261 {
262 	if (!vq)
263 		return -1;
264 
265 	if (vq->call_fd > 0) {
266 		close(vq->call_fd);
267 		vq->call_fd = -1;
268 	}
269 
270 	if (vq->kick_fd > 0) {
271 		close(vq->kick_fd);
272 		vq->kick_fd = -1;
273 	}
274 
275 	return 0;
276 }
277 
278 static int
vhost_vq_start(struct vhost_dev * vdev,int idx)279 vhost_vq_start(struct vhost_dev *vdev, int idx)
280 {
281 	struct vhost_vq *vq;
282 	struct virtio_vq_info *vqi;
283 	struct vhost_vring_state ring;
284 	struct vhost_vring_addr addr;
285 	struct vhost_vring_file file;
286 	int rc, q_idx;
287 
288 	/* sanity check */
289 	if (!vdev->base || !vdev->base->queues || !vdev->base->vops ||
290 		!vdev->vqs) {
291 		WPRINTF("vhost_dev is not initialized\n");
292 		goto fail;
293 	}
294 
295 	/*
296 	 * vq_idx is introduced to support multi-queue feature of vhost net.
297 	 * When multi-queue feature is enabled, every vhost_dev owns part of
298 	 * the virtqueues defined by virtio backend driver in device model,
299 	 * they are specified by
300 	 * [vdev->vq_idx, vdev->vq_idx + vhost_dev->nvqs)
301 	 * If multi-queue feature is not required, just leave vdev->vq_idx
302 	 * to zero.
303 	 */
304 	q_idx = idx + vdev->vq_idx;
305 	if (q_idx >= vdev->base->vops->nvq) {
306 		WPRINTF("invalid vq index: idx = %d, vq_idx = %d\n",
307 			idx, vdev->vq_idx);
308 		goto fail;
309 	}
310 	vqi = &vdev->base->queues[q_idx];
311 	vq = &vdev->vqs[idx];
312 
313 	/* clear kick_fd and call_fd */
314 	vhost_eventfd_test_and_clear(vq->kick_fd);
315 	vhost_eventfd_test_and_clear(vq->call_fd);
316 
317 	/* register ioeventfd & irqfd */
318 	rc = vhost_vq_register_eventfd(vdev, idx, true);
319 	if (rc < 0) {
320 		WPRINTF("register eventfd failed: idx = %d\n", idx);
321 		goto fail;
322 	}
323 
324 	/* VHOST_SET_VRING_NUM */
325 	ring.index = idx;
326 	ring.num = vqi->qsize;
327 	rc = vhost_kernel_set_vring_num(vdev, &ring);
328 	if (rc < 0) {
329 		WPRINTF("set_vring_num failed: idx = %d\n", idx);
330 		goto fail_vring;
331 	}
332 
333 	/* VHOST_SET_VRING_BASE */
334 	ring.num = vqi->last_avail;
335 	rc = vhost_kernel_set_vring_base(vdev, &ring);
336 	if (rc < 0) {
337 		WPRINTF("set_vring_base failed: idx = %d, last_avail = %d\n",
338 			idx, vqi->last_avail);
339 		goto fail_vring;
340 	}
341 
342 	/* VHOST_SET_VRING_ADDR */
343 	addr.index = idx;
344 	addr.desc_user_addr = (uintptr_t)vqi->desc;
345 	addr.avail_user_addr = (uintptr_t)vqi->avail;
346 	addr.used_user_addr = (uintptr_t)vqi->used;
347 	addr.log_guest_addr = (uintptr_t)NULL;
348 	addr.flags = 0;
349 	rc = vhost_kernel_set_vring_addr(vdev, &addr);
350 	if (rc < 0) {
351 		WPRINTF("set_vring_addr failed: idx = %d\n", idx);
352 		goto fail_vring;
353 	}
354 
355 	/* VHOST_SET_VRING_CALL */
356 	file.index = idx;
357 	file.fd = vq->call_fd;
358 	rc = vhost_kernel_set_vring_call(vdev, &file);
359 	if (rc < 0) {
360 		WPRINTF("set_vring_call failed\n");
361 		goto fail_vring;
362 	}
363 
364 	/* VHOST_SET_VRING_KICK */
365 	file.index = idx;
366 	file.fd = vq->kick_fd;
367 	rc = vhost_kernel_set_vring_kick(vdev, &file);
368 	if (rc < 0) {
369 		WPRINTF("set_vring_kick failed: idx = %d", idx);
370 		goto fail_vring_kick;
371 	}
372 
373 	return 0;
374 
375 fail_vring_kick:
376 	file.index = idx;
377 	file.fd = -1;
378 	vhost_kernel_set_vring_call(vdev, &file);
379 fail_vring:
380 	vhost_vq_register_eventfd(vdev, idx, false);
381 fail:
382 	return -1;
383 }
384 
385 static int
vhost_vq_stop(struct vhost_dev * vdev,int idx)386 vhost_vq_stop(struct vhost_dev *vdev, int idx)
387 {
388 	struct virtio_vq_info *vqi;
389 	struct vhost_vring_file file;
390 	struct vhost_vring_state ring;
391 	int rc, q_idx;
392 
393 	/* sanity check */
394 	if (!vdev->base || !vdev->base->queues || !vdev->base->vops ||
395 		!vdev->vqs) {
396 		WPRINTF("vhost_dev is not initialized\n");
397 		return -1;
398 	}
399 
400 	q_idx = idx + vdev->vq_idx;
401 	if (q_idx >= vdev->base->vops->nvq) {
402 		WPRINTF("invalid vq index: idx = %d, vq_idx = %d\n",
403 			idx, vdev->vq_idx);
404 		return -1;
405 	}
406 	vqi = &vdev->base->queues[q_idx];
407 
408 	file.index = idx;
409 	file.fd = -1;
410 
411 	/* VHOST_SET_VRING_KICK */
412 	vhost_kernel_set_vring_kick(vdev, &file);
413 
414 	/* VHOST_SET_VRING_CALL */
415 	vhost_kernel_set_vring_call(vdev, &file);
416 
417 	/* VHOST_GET_VRING_BASE */
418 	ring.index = idx;
419 	rc = vhost_kernel_get_vring_base(vdev, &ring);
420 	if (rc < 0)
421 		WPRINTF("get_vring_base failed: idx = %d", idx);
422 	else
423 		vqi->last_avail = ring.num;
424 
425 	/* update vqi->save_used */
426 	vqi->save_used = vqi->used->idx;
427 
428 	/* unregister ioeventfd & irqfd */
429 	rc = vhost_vq_register_eventfd(vdev, idx, false);
430 	if (rc < 0)
431 		WPRINTF("unregister eventfd failed: idx = %d\n", idx);
432 
433 	return rc;
434 }
435 
436 static int
vhost_set_mem_table(struct vhost_dev * vdev)437 vhost_set_mem_table(struct vhost_dev *vdev)
438 {
439 	struct vmctx *ctx;
440 	struct vhost_memory *mem;
441 	uint32_t nregions = 0;
442 	int rc;
443 
444 	ctx = vdev->base->dev->vmctx;
445 	if (ctx->lowmem > 0)
446 		nregions++;
447 	if (ctx->highmem > 0)
448 		nregions++;
449 
450 	mem = calloc(1, sizeof(struct vhost_memory) +
451 		sizeof(struct vhost_memory_region) * nregions);
452 	if (!mem) {
453 		WPRINTF("out of memory\n");
454 		return -1;
455 	}
456 
457 	nregions = 0;
458 	if (ctx->lowmem > 0) {
459 		mem->regions[nregions].guest_phys_addr = (uintptr_t)0;
460 		mem->regions[nregions].memory_size = ctx->lowmem;
461 		mem->regions[nregions].userspace_addr =
462 			(uintptr_t)ctx->baseaddr;
463 		DPRINTF("[%d][0x%llx -> 0x%llx, 0x%llx]\n",
464 			nregions,
465 			mem->regions[nregions].guest_phys_addr,
466 			mem->regions[nregions].userspace_addr,
467 			mem->regions[nregions].memory_size);
468 		nregions++;
469 	}
470 
471 	if (ctx->highmem > 0) {
472 		mem->regions[nregions].guest_phys_addr = ctx->highmem_gpa_base;
473 		mem->regions[nregions].memory_size = ctx->highmem;
474 		mem->regions[nregions].userspace_addr =
475 			(uintptr_t)(ctx->baseaddr + ctx->highmem_gpa_base);
476 		DPRINTF("[%d][0x%llx -> 0x%llx, 0x%llx]\n",
477 			nregions,
478 			mem->regions[nregions].guest_phys_addr,
479 			mem->regions[nregions].userspace_addr,
480 			mem->regions[nregions].memory_size);
481 		nregions++;
482 	}
483 
484 	mem->nregions = nregions;
485 	mem->padding = 0;
486 	rc = vhost_kernel_set_mem_table(vdev, mem);
487 	free(mem);
488 	if (rc < 0) {
489 		WPRINTF("set_mem_table failed\n");
490 		return -1;
491 	}
492 
493 	return 0;
494 }
495 
496 /**
497  * @brief vhost_dev initialization.
498  *
499  * This interface is called to initialize the vhost_dev. It must be called
500  * before the actual feature negotiation with the guest OS starts.
501  *
502  * @param vdev Pointer to struct vhost_dev.
503  * @param base Pointer to struct virtio_base.
504  * @param fd fd of the vhost chardev.
505  * @param vq_idx The first virtqueue which would be used by this vhost dev.
506  * @param vhost_features Subset of vhost features which would be enabled.
507  * @param vhost_ext_features Specific vhost internal features to be enabled.
508  * @param busyloop_timeout Busy loop timeout in us.
509  *
510  * @return 0 on success and -1 on failure.
511  */
512 int
vhost_dev_init(struct vhost_dev * vdev,struct virtio_base * base,int fd,int vq_idx,uint64_t vhost_features,uint64_t vhost_ext_features,uint32_t busyloop_timeout)513 vhost_dev_init(struct vhost_dev *vdev,
514 	       struct virtio_base *base,
515 	       int fd,
516 	       int vq_idx,
517 	       uint64_t vhost_features,
518 	       uint64_t vhost_ext_features,
519 	       uint32_t busyloop_timeout)
520 {
521 	uint64_t features;
522 	int i, rc;
523 
524 	/* sanity check */
525 	if (!base || !base->queues || !base->vops) {
526 		WPRINTF("virtio_base is not initialized\n");
527 		goto fail;
528 	}
529 
530 	if (!vdev->vqs || vdev->nvqs == 0) {
531 		WPRINTF("virtqueue is not initialized\n");
532 		goto fail;
533 	}
534 
535 	if (vq_idx + vdev->nvqs > base->vops->nvq) {
536 		WPRINTF("invalid vq_idx: %d\n", vq_idx);
537 		goto fail;
538 	}
539 
540 	vhost_kernel_init(vdev, base, fd, vq_idx, busyloop_timeout);
541 
542 	rc = vhost_kernel_get_features(vdev, &features);
543 	if (rc < 0) {
544 		WPRINTF("vhost_get_features failed\n");
545 		goto fail;
546 	}
547 
548 	for (i = 0; i < vdev->nvqs; i++) {
549 		rc = vhost_vq_init(vdev, i);
550 		if (rc < 0)
551 			goto fail;
552 	}
553 
554 	/* specific backend features to vhost */
555 	vdev->vhost_ext_features = vhost_ext_features & features;
556 
557 	/* features supported by vhost */
558 	vdev->vhost_features = vhost_features & features;
559 
560 	/*
561 	 * If the features bits are not supported by either vhost kernel
562 	 * mediator or configuration of device model(specified by
563 	 * vhost_features), they should be disabled in device_caps,
564 	 * which expose as virtio host_features for virtio FE driver.
565 	 */
566 	vdev->base->device_caps &= ~(vhost_features ^ features);
567 	vdev->started = false;
568 
569 	return 0;
570 
571 fail:
572 	vhost_dev_deinit(vdev);
573 	return -1;
574 }
575 
576 /**
577  * @brief vhost_dev cleanup.
578  *
579  * This interface is called to cleanup the vhost_dev.
580  *
581  * @param vdev Pointer to struct vhost_dev.
582  *
583  * @return 0 on success and -1 on failure.
584  */
585 int
vhost_dev_deinit(struct vhost_dev * vdev)586 vhost_dev_deinit(struct vhost_dev *vdev)
587 {
588 	int i;
589 
590 	if (!vdev->base || !vdev->base->queues || !vdev->base->vops)
591 		return -1;
592 
593 	for (i = 0; i < vdev->nvqs; i++)
594 		vhost_vq_deinit(&vdev->vqs[i]);
595 
596 	vhost_kernel_deinit(vdev);
597 
598 	return 0;
599 }
600 
601 /**
602  * @brief start vhost data plane.
603  *
604  * This interface is called to start the data plane in vhost.
605  *
606  * @param vdev Pointer to struct vhost_dev.
607  *
608  * @return 0 on success and -1 on failure.
609  */
610 int
vhost_dev_start(struct vhost_dev * vdev)611 vhost_dev_start(struct vhost_dev *vdev)
612 {
613 	struct vhost_vring_state state;
614 	uint64_t features;
615 	int i, rc;
616 
617 	if (vdev->started)
618 		return 0;
619 
620 	/* sanity check */
621 	if (!vdev->base || !vdev->base->queues || !vdev->base->vops) {
622 		WPRINTF("virtio_base is not initialized\n");
623 		goto fail;
624 	}
625 
626 	if ((vdev->base->status & VIRTIO_CONFIG_S_DRIVER_OK) == 0) {
627 		WPRINTF("status error 0x%x\n", vdev->base->status);
628 		goto fail;
629 	}
630 
631 	/* only msix is supported now */
632 	if (!pci_msix_enabled(vdev->base->dev)) {
633 		WPRINTF("only msix is supported\n");
634 		goto fail;
635 	}
636 
637 	rc = vhost_kernel_set_owner(vdev);
638 	if (rc < 0) {
639 		WPRINTF("vhost_set_owner failed\n");
640 		goto fail;
641 	}
642 
643 	/* set vhost internal features */
644 	features = (vdev->base->negotiated_caps & vdev->vhost_features) |
645 		vdev->vhost_ext_features;
646 	rc = vhost_kernel_set_features(vdev, features);
647 	if (rc < 0) {
648 		WPRINTF("set_features failed\n");
649 		goto fail;
650 	}
651 	DPRINTF("set_features: 0x%lx\n", features);
652 
653 	/* set memory table */
654 	rc = vhost_set_mem_table(vdev);
655 	if (rc < 0) {
656 		WPRINTF("set_mem_table failed\n");
657 		goto fail;
658 	}
659 
660 	/* config busyloop timeout */
661 	if (vdev->busyloop_timeout) {
662 		state.num = vdev->busyloop_timeout;
663 		for (i = 0; i < vdev->nvqs; i++) {
664 			state.index = i;
665 			rc = vhost_kernel_set_vring_busyloop_timeout(vdev,
666 				&state);
667 			if (rc < 0) {
668 				WPRINTF("set_busyloop_timeout failed\n");
669 				goto fail;
670 			}
671 		}
672 	}
673 
674 	/* start vhost virtqueue */
675 	for (i = 0; i < vdev->nvqs; i++) {
676 		rc = vhost_vq_start(vdev, i);
677 		if (rc < 0)
678 			goto fail_vq;
679 	}
680 
681 	vdev->started = true;
682 	return 0;
683 
684 fail_vq:
685 	while (--i >= 0)
686 		vhost_vq_stop(vdev, i);
687 fail:
688 	return -1;
689 }
690 
691 /**
692  * @brief stop vhost data plane.
693  *
694  * This interface is called to stop the data plane in vhost.
695  *
696  * @param vdev Pointer to struct vhost_dev.
697  *
698  * @return 0 on success and -1 on failure.
699  */
700 int
vhost_dev_stop(struct vhost_dev * vdev)701 vhost_dev_stop(struct vhost_dev *vdev)
702 {
703 	int i, rc = 0;
704 
705 	for (i = 0; i < vdev->nvqs; i++)
706 		vhost_vq_stop(vdev, i);
707 
708 	/* the following are done by this ioctl:
709 	 * 1) resources of the vhost dev are freed
710 	 * 2) vhost virtqueues are reset
711 	 */
712 	rc = vhost_kernel_reset_device(vdev);
713 	if (rc < 0) {
714 		WPRINTF("vhost_reset_device failed\n");
715 		rc = -1;
716 	}
717 
718 	vdev->started = false;
719 	return rc;
720 }
721