1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <string.h>
9 #include <openamp/virtqueue.h>
10 #include <metal/atomic.h>
11 #include <metal/log.h>
12 #include <metal/alloc.h>
13 
14 /* Prototype for internal functions. */
15 static void vq_ring_init(struct virtqueue *, void *, int);
16 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
17 static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
18 				   uint16_t, struct virtqueue_buf *, int, int);
19 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
20 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
21 static int vq_ring_must_notify_host(struct virtqueue *vq);
22 static void vq_ring_notify_host(struct virtqueue *vq);
23 static int virtqueue_nused(struct virtqueue *vq);
24 
25 /* Default implementation of P2V based on libmetal */
virtqueue_phys_to_virt(struct virtqueue * vq,metal_phys_addr_t phys)26 static inline void *virtqueue_phys_to_virt(struct virtqueue *vq,
27 					   metal_phys_addr_t phys)
28 {
29 	struct metal_io_region *io = vq->shm_io;
30 
31 	return metal_io_phys_to_virt(io, phys);
32 }
33 
34 /* Default implementation of V2P based on libmetal */
virtqueue_virt_to_phys(struct virtqueue * vq,void * buf)35 static inline metal_phys_addr_t virtqueue_virt_to_phys(struct virtqueue *vq,
36 						       void *buf)
37 {
38 	struct metal_io_region *io = vq->shm_io;
39 
40 	return metal_io_virt_to_phys(io, buf);
41 }
42 
43 /**
44  * virtqueue_create - Creates new VirtIO queue
45  *
46  * @param device    - Pointer to VirtIO device
47  * @param id        - VirtIO queue ID , must be unique
48  * @param name      - Name of VirtIO queue
49  * @param ring      - Pointer to vring_alloc_info control block
50  * @param callback  - Pointer to callback function, invoked
51  *                    when message is available on VirtIO queue
52  * @param notify    - Pointer to notify function, used to notify
53  *                    other side that there is job available for it
54  * @param vq        - Created VirtIO queue.
55  *
56  * @return          - Function status
57  */
virtqueue_create(struct virtio_device * virt_dev,unsigned short id,const char * name,struct vring_alloc_info * ring,void (* callback)(struct virtqueue * vq),void (* notify)(struct virtqueue * vq),struct virtqueue * vq)58 int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
59 		     const char *name, struct vring_alloc_info *ring,
60 		     void (*callback)(struct virtqueue *vq),
61 		     void (*notify)(struct virtqueue *vq),
62 		     struct virtqueue *vq)
63 {
64 	int status = VQUEUE_SUCCESS;
65 
66 	VQ_PARAM_CHK(ring == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
67 	VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
68 	VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
69 		     ERROR_VRING_ALIGN);
70 	VQ_PARAM_CHK(vq == NULL, status, ERROR_NO_MEM);
71 
72 	if (status == VQUEUE_SUCCESS) {
73 		vq->vq_dev = virt_dev;
74 		vq->vq_name =  name;
75 		vq->vq_queue_index = id;
76 		vq->vq_nentries = ring->num_descs;
77 		vq->vq_free_cnt = vq->vq_nentries;
78 		vq->callback = callback;
79 		vq->notify = notify;
80 
81 		/* Initialize vring control block in virtqueue. */
82 		vq_ring_init(vq, (void *)ring->vaddr, ring->align);
83 
84 		/* Disable callbacks - will be enabled by the application
85 		 * once initialization is completed.
86 		 */
87 		virtqueue_disable_cb(vq);
88 	}
89 
90 	return (status);
91 }
92 
93 /**
94  * virtqueue_add_buffer()   - Enqueues new buffer in vring for consumption
95  *                            by other side. Readable buffers are always
96  *                            inserted before writable buffers
97  *
98  * @param vq                - Pointer to VirtIO queue control block.
99  * @param buf_list          - Pointer to a list of virtqueue buffers.
100  * @param readable          - Number of readable buffers
101  * @param writable          - Number of writable buffers
102  * @param cookie            - Pointer to hold call back data
103  *
104  * @return                  - Function status
105  */
virtqueue_add_buffer(struct virtqueue * vq,struct virtqueue_buf * buf_list,int readable,int writable,void * cookie)106 int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
107 			 int readable, int writable, void *cookie)
108 {
109 	struct vq_desc_extra *dxp = NULL;
110 	int status = VQUEUE_SUCCESS;
111 	uint16_t head_idx;
112 	uint16_t idx;
113 	int needed;
114 
115 	needed = readable + writable;
116 
117 	VQ_PARAM_CHK(vq == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
118 	VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
119 	VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
120 
121 	VQUEUE_BUSY(vq);
122 
123 	if (status == VQUEUE_SUCCESS) {
124 		VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
125 
126 		head_idx = vq->vq_desc_head_idx;
127 		VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
128 		dxp = &vq->vq_descx[head_idx];
129 
130 		VQASSERT(vq, dxp->cookie == NULL,
131 			 "cookie already exists for index");
132 
133 		dxp->cookie = cookie;
134 		dxp->ndescs = needed;
135 
136 		/* Enqueue buffer onto the ring. */
137 		idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx,
138 					 buf_list, readable, writable);
139 
140 		vq->vq_desc_head_idx = idx;
141 		vq->vq_free_cnt -= needed;
142 
143 		if (vq->vq_free_cnt == 0) {
144 			VQ_RING_ASSERT_CHAIN_TERM(vq);
145 		} else {
146 			VQ_RING_ASSERT_VALID_IDX(vq, idx);
147 		}
148 
149 		/*
150 		 * Update vring_avail control block fields so that other
151 		 * side can get buffer using it.
152 		 */
153 		vq_ring_update_avail(vq, head_idx);
154 	}
155 
156 	VQUEUE_IDLE(vq);
157 
158 	return status;
159 }
160 
161 /**
162  * virtqueue_get_buffer - Returns used buffers from VirtIO queue
163  *
164  * @param vq            - Pointer to VirtIO queue control block
165  * @param len           - Length of conumed buffer
166  * @param idx           - index of the buffer
167  *
168  * @return              - Pointer to used buffer
169  */
virtqueue_get_buffer(struct virtqueue * vq,uint32_t * len,uint16_t * idx)170 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
171 {
172 	struct vring_used_elem *uep;
173 	void *cookie;
174 	uint16_t used_idx, desc_idx;
175 
176 	if (!vq || vq->vq_used_cons_idx == vq->vq_ring.used->idx)
177 		return (NULL);
178 
179 	VQUEUE_BUSY(vq);
180 
181 	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
182 	uep = &vq->vq_ring.used->ring[used_idx];
183 
184 	atomic_thread_fence(memory_order_seq_cst);
185 
186 	desc_idx = (uint16_t)uep->id;
187 	if (len)
188 		*len = uep->len;
189 
190 	vq_ring_free_chain(vq, desc_idx);
191 
192 	cookie = vq->vq_descx[desc_idx].cookie;
193 	vq->vq_descx[desc_idx].cookie = NULL;
194 
195 	if (idx)
196 		*idx = used_idx;
197 	VQUEUE_IDLE(vq);
198 
199 	return cookie;
200 }
201 
virtqueue_get_buffer_length(struct virtqueue * vq,uint16_t idx)202 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
203 {
204 	return vq->vq_ring.desc[idx].len;
205 }
206 
207 /**
208  * virtqueue_free   - Frees VirtIO queue resources
209  *
210  * @param vq        - Pointer to VirtIO queue control block
211  *
212  */
virtqueue_free(struct virtqueue * vq)213 void virtqueue_free(struct virtqueue *vq)
214 {
215 	if (vq) {
216 		if (vq->vq_free_cnt != vq->vq_nentries) {
217 			metal_log(METAL_LOG_WARNING,
218 				  "%s: freeing non-empty virtqueue\r\n",
219 				  vq->vq_name);
220 		}
221 
222 		metal_free_memory(vq);
223 	}
224 }
225 
226 /**
227  * virtqueue_get_available_buffer   - Returns buffer available for use in the
228  *                                    VirtIO queue
229  *
230  * @param vq                        - Pointer to VirtIO queue control block
231  * @param avail_idx                 - Pointer to index used in vring desc table
232  * @param len                       - Length of buffer
233  *
234  * @return                          - Pointer to available buffer
235  */
virtqueue_get_available_buffer(struct virtqueue * vq,uint16_t * avail_idx,uint32_t * len)236 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
237 				     uint32_t *len)
238 {
239 	uint16_t head_idx = 0;
240 	void *buffer;
241 
242 	atomic_thread_fence(memory_order_seq_cst);
243 	if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
244 		return NULL;
245 	}
246 
247 	VQUEUE_BUSY(vq);
248 
249 	head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
250 	*avail_idx = vq->vq_ring.avail->ring[head_idx];
251 
252 	buffer = virtqueue_phys_to_virt(vq, vq->vq_ring.desc[*avail_idx].addr);
253 	*len = vq->vq_ring.desc[*avail_idx].len;
254 
255 	VQUEUE_IDLE(vq);
256 
257 	return buffer;
258 }
259 
260 /**
261  * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
262  *
263  * @param vq                     - Pointer to VirtIO queue control block
264  * @param head_idx               - Index of vring desc containing used buffer
265  * @param len                    - Length of buffer
266  *
267  * @return                       - Function status
268  */
virtqueue_add_consumed_buffer(struct virtqueue * vq,uint16_t head_idx,uint32_t len)269 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
270 				  uint32_t len)
271 {
272 	struct vring_used_elem *used_desc = NULL;
273 	uint16_t used_idx;
274 
275 	if (head_idx > vq->vq_nentries) {
276 		return ERROR_VRING_NO_BUFF;
277 	}
278 
279 	VQUEUE_BUSY(vq);
280 
281 	used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
282 	used_desc = &vq->vq_ring.used->ring[used_idx];
283 	used_desc->id = head_idx;
284 	used_desc->len = len;
285 
286 	atomic_thread_fence(memory_order_seq_cst);
287 
288 	vq->vq_ring.used->idx++;
289 
290 	VQUEUE_IDLE(vq);
291 
292 	return VQUEUE_SUCCESS;
293 }
294 
295 /**
296  * virtqueue_enable_cb  - Enables callback generation
297  *
298  * @param vq            - Pointer to VirtIO queue control block
299  *
300  * @return              - Function status
301  */
virtqueue_enable_cb(struct virtqueue * vq)302 int virtqueue_enable_cb(struct virtqueue *vq)
303 {
304 	return vq_ring_enable_interrupt(vq, 0);
305 }
306 
307 /**
308  * virtqueue_enable_cb - Disables callback generation
309  *
310  * @param vq           - Pointer to VirtIO queue control block
311  *
312  */
virtqueue_disable_cb(struct virtqueue * vq)313 void virtqueue_disable_cb(struct virtqueue *vq)
314 {
315 	VQUEUE_BUSY(vq);
316 
317 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
318 		vring_used_event(&vq->vq_ring) =
319 		    vq->vq_used_cons_idx - vq->vq_nentries - 1;
320 	} else {
321 		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
322 	}
323 
324 	VQUEUE_IDLE(vq);
325 }
326 
327 /**
328  * virtqueue_kick - Notifies other side that there is buffer available for it.
329  *
330  * @param vq      - Pointer to VirtIO queue control block
331  */
virtqueue_kick(struct virtqueue * vq)332 void virtqueue_kick(struct virtqueue *vq)
333 {
334 	VQUEUE_BUSY(vq);
335 
336 	/* Ensure updated avail->idx is visible to host. */
337 	atomic_thread_fence(memory_order_seq_cst);
338 
339 	if (vq_ring_must_notify_host(vq))
340 		vq_ring_notify_host(vq);
341 
342 	vq->vq_queued_cnt = 0;
343 
344 	VQUEUE_IDLE(vq);
345 }
346 
347 /**
348  * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
349  *
350  * @param vq - Pointer to VirtIO queue control block
351  */
virtqueue_dump(struct virtqueue * vq)352 void virtqueue_dump(struct virtqueue *vq)
353 {
354 	if (!vq)
355 		return;
356 
357 	metal_log(METAL_LOG_DEBUG,
358 		  "VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
359 		  "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
360 		  "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
361 		  vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
362 		  virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
363 		  vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
364 		  vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
365 		  vq->vq_ring.used->flags);
366 }
367 
368 /**
369  * virtqueue_get_desc_size - Returns vring descriptor size
370  *
371  * @param vq            - Pointer to VirtIO queue control block
372  *
373  * @return              - Descriptor length
374  */
virtqueue_get_desc_size(struct virtqueue * vq)375 uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
376 {
377 	uint16_t head_idx = 0;
378 	uint16_t avail_idx = 0;
379 	uint32_t len = 0;
380 
381 	if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
382 		return 0;
383 	}
384 
385 	VQUEUE_BUSY(vq);
386 
387 	head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
388 	avail_idx = vq->vq_ring.avail->ring[head_idx];
389 	len = vq->vq_ring.desc[avail_idx].len;
390 
391 	VQUEUE_IDLE(vq);
392 
393 	return len;
394 }
395 
396 /**************************************************************************
397  *                            Helper Functions                            *
398  **************************************************************************/
399 
400 /**
401  *
402  * vq_ring_add_buffer
403  *
404  */
vq_ring_add_buffer(struct virtqueue * vq,struct vring_desc * desc,uint16_t head_idx,struct virtqueue_buf * buf_list,int readable,int writable)405 static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
406 				   struct vring_desc *desc, uint16_t head_idx,
407 				   struct virtqueue_buf *buf_list, int readable,
408 				   int writable)
409 {
410 	struct vring_desc *dp;
411 	int i, needed;
412 	uint16_t idx;
413 
414 	(void)vq;
415 
416 	needed = readable + writable;
417 
418 	for (i = 0, idx = head_idx; i < needed; i++, idx = dp->next) {
419 		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
420 			 "premature end of free desc chain");
421 
422 		dp = &desc[idx];
423 		dp->addr = virtqueue_virt_to_phys(vq, buf_list[i].buf);
424 		dp->len = buf_list[i].len;
425 		dp->flags = 0;
426 
427 		if (i < needed - 1)
428 			dp->flags |= VRING_DESC_F_NEXT;
429 
430 		/*
431 		 * Readable buffers are inserted  into vring before the
432 		 * writable buffers.
433 		 */
434 		if (i >= readable)
435 			dp->flags |= VRING_DESC_F_WRITE;
436 	}
437 
438 	return (idx);
439 }
440 
441 /**
442  *
443  * vq_ring_free_chain
444  *
445  */
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)446 static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
447 {
448 	struct vring_desc *dp;
449 	struct vq_desc_extra *dxp;
450 
451 	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
452 	dp = &vq->vq_ring.desc[desc_idx];
453 	dxp = &vq->vq_descx[desc_idx];
454 
455 	if (vq->vq_free_cnt == 0) {
456 		VQ_RING_ASSERT_CHAIN_TERM(vq);
457 	}
458 
459 	vq->vq_free_cnt += dxp->ndescs;
460 	dxp->ndescs--;
461 
462 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
463 		while (dp->flags & VRING_DESC_F_NEXT) {
464 			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
465 			dp = &vq->vq_ring.desc[dp->next];
466 			dxp->ndescs--;
467 		}
468 	}
469 
470 	VQASSERT(vq, (dxp->ndescs == 0),
471 		 "failed to free entire desc chain, remaining");
472 
473 	/*
474 	 * We must append the existing free chain, if any, to the end of
475 	 * newly freed chain. If the virtqueue was completely used, then
476 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
477 	 */
478 	dp->next = vq->vq_desc_head_idx;
479 	vq->vq_desc_head_idx = desc_idx;
480 }
481 
482 /**
483  *
484  * vq_ring_init
485  *
486  */
vq_ring_init(struct virtqueue * vq,void * ring_mem,int alignment)487 static void vq_ring_init(struct virtqueue *vq, void *ring_mem, int alignment)
488 {
489 	struct vring *vr;
490 	int i, size;
491 
492 	size = vq->vq_nentries;
493 	vr = &vq->vq_ring;
494 
495 	vring_init(vr, size, (unsigned char *)ring_mem, alignment);
496 
497 	for (i = 0; i < size - 1; i++)
498 		vr->desc[i].next = i + 1;
499 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
500 }
501 
502 /**
503  *
504  * vq_ring_update_avail
505  *
506  */
vq_ring_update_avail(struct virtqueue * vq,uint16_t desc_idx)507 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
508 {
509 	uint16_t avail_idx;
510 
511 	/*
512 	 * Place the head of the descriptor chain into the next slot and make
513 	 * it usable to the host. The chain is made available now rather than
514 	 * deferring to virtqueue_notify() in the hopes that if the host is
515 	 * currently running on another CPU, we can keep it processing the new
516 	 * descriptor.
517 	 */
518 	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
519 	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
520 
521 	atomic_thread_fence(memory_order_seq_cst);
522 
523 	vq->vq_ring.avail->idx++;
524 
525 	/* Keep pending count until virtqueue_notify(). */
526 	vq->vq_queued_cnt++;
527 }
528 
529 /**
530  *
531  * vq_ring_enable_interrupt
532  *
533  */
vq_ring_enable_interrupt(struct virtqueue * vq,uint16_t ndesc)534 static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
535 {
536 	/*
537 	 * Enable interrupts, making sure we get the latest index of
538 	 * what's already been consumed.
539 	 */
540 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
541 		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
542 	} else {
543 		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
544 	}
545 
546 	atomic_thread_fence(memory_order_seq_cst);
547 
548 	/*
549 	 * Enough items may have already been consumed to meet our threshold
550 	 * since we last checked. Let our caller know so it processes the new
551 	 * entries.
552 	 */
553 	if (virtqueue_nused(vq) > ndesc) {
554 		return 1;
555 	}
556 
557 	return 0;
558 }
559 
560 /**
561  *
562  * virtqueue_interrupt
563  *
564  */
virtqueue_notification(struct virtqueue * vq)565 void virtqueue_notification(struct virtqueue *vq)
566 {
567 	atomic_thread_fence(memory_order_seq_cst);
568 	if (vq->callback)
569 		vq->callback(vq);
570 }
571 
572 /**
573  *
574  * vq_ring_must_notify_host
575  *
576  */
vq_ring_must_notify_host(struct virtqueue * vq)577 static int vq_ring_must_notify_host(struct virtqueue *vq)
578 {
579 	uint16_t new_idx, prev_idx, event_idx;
580 
581 	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
582 		new_idx = vq->vq_ring.avail->idx;
583 		prev_idx = new_idx - vq->vq_queued_cnt;
584 		event_idx = vring_avail_event(&vq->vq_ring);
585 
586 		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
587 	}
588 
589 	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
590 }
591 
592 /**
593  *
594  * vq_ring_notify_host
595  *
596  */
vq_ring_notify_host(struct virtqueue * vq)597 static void vq_ring_notify_host(struct virtqueue *vq)
598 {
599 	if (vq->notify)
600 		vq->notify(vq);
601 }
602 
603 /**
604  *
605  * virtqueue_nused
606  *
607  */
virtqueue_nused(struct virtqueue * vq)608 static int virtqueue_nused(struct virtqueue *vq)
609 {
610 	uint16_t used_idx, nused;
611 
612 	used_idx = vq->vq_ring.used->idx;
613 
614 	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
615 	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
616 
617 	return nused;
618 }
619