1 #ifndef VIRTQUEUE_H_
2 #define VIRTQUEUE_H_
3 
4 /*-
5  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
6  * All rights reserved.
7  *
8  * SPDX-License-Identifier: BSD-2-Clause
9  *
10  * $FreeBSD$
11  */
12 
13 #include <stdbool.h>
14 #include <stdint.h>
15 
16 #if defined __cplusplus
17 extern "C" {
18 #endif
19 
20 typedef uint8_t boolean;
21 
22 #include <openamp/virtio_ring.h>
23 #include <metal/alloc.h>
24 #include <metal/io.h>
25 
26 /*Error Codes*/
27 #define VQ_ERROR_BASE                                 -3000
28 #define ERROR_VRING_FULL                              (VQ_ERROR_BASE - 1)
29 #define ERROR_INVLD_DESC_IDX                          (VQ_ERROR_BASE - 2)
30 #define ERROR_EMPTY_RING                              (VQ_ERROR_BASE - 3)
31 #define ERROR_NO_MEM                                  (VQ_ERROR_BASE - 4)
32 #define ERROR_VRING_MAX_DESC                          (VQ_ERROR_BASE - 5)
33 #define ERROR_VRING_ALIGN                             (VQ_ERROR_BASE - 6)
34 #define ERROR_VRING_NO_BUFF                           (VQ_ERROR_BASE - 7)
35 #define ERROR_VQUEUE_INVLD_PARAM                      (VQ_ERROR_BASE - 8)
36 
37 #define VQUEUE_SUCCESS                                0
38 
39 /* The maximum virtqueue size is 2^15. Use that value as the end of
40  * descriptor chain terminator since it will never be a valid index
41  * in the descriptor table. This is used to verify we are correctly
42  * handling vq_free_cnt.
43  */
44 #define VQ_RING_DESC_CHAIN_END                         32768
45 #define VIRTQUEUE_FLAG_INDIRECT                        0x0001
46 #define VIRTQUEUE_FLAG_EVENT_IDX                       0x0002
47 #define VIRTQUEUE_MAX_NAME_SZ                          32
48 
49 /* Support for indirect buffer descriptors. */
50 #define VIRTIO_RING_F_INDIRECT_DESC    (1 << 28)
51 
52 /* Support to suppress interrupt until specific index is reached. */
53 #define VIRTIO_RING_F_EVENT_IDX        (1 << 29)
54 
55 struct virtqueue_buf {
56 	void *buf;
57 	int len;
58 };
59 
60 struct virtqueue {
61 	struct virtio_device *vq_dev;
62 	const char *vq_name;
63 	uint16_t vq_queue_index;
64 	uint16_t vq_nentries;
65 	uint32_t vq_flags;
66 	void (*callback)(struct virtqueue *vq);
67 	void (*notify)(struct virtqueue *vq);
68 	struct vring vq_ring;
69 	uint16_t vq_free_cnt;
70 	uint16_t vq_queued_cnt;
71 	void *shm_io; /* opaque pointer to data needed to allow v2p & p2v */
72 
73 	/*
74 	 * Head of the free chain in the descriptor table. If
75 	 * there are no free descriptors, this will be set to
76 	 * VQ_RING_DESC_CHAIN_END.
77 	 */
78 	uint16_t vq_desc_head_idx;
79 
80 	/*
81 	 * Last consumed descriptor in the used table,
82 	 * trails vq_ring.used->idx.
83 	 */
84 	uint16_t vq_used_cons_idx;
85 
86 	/*
87 	 * Last consumed descriptor in the available table -
88 	 * used by the consumer side.
89 	 */
90 	uint16_t vq_available_idx;
91 
92 #ifdef VQUEUE_DEBUG
93 	boolean vq_inuse;
94 #endif
95 
96 	/*
97 	 * Used by the host side during callback. Cookie
98 	 * holds the address of buffer received from other side.
99 	 * Other fields in this structure are not used currently.
100 	 */
101 
102 	struct vq_desc_extra {
103 		void *cookie;
104 		uint16_t ndescs;
105 	} vq_descx[0];
106 };
107 
108 /* struct to hold vring specific information */
109 struct vring_alloc_info {
110 	void *vaddr;
111 	uint32_t align;
112 	uint16_t num_descs;
113 	uint16_t pad;
114 };
115 
116 typedef void vq_callback(struct virtqueue *);
117 typedef void vq_notify(struct virtqueue *);
118 
119 #ifdef VQUEUE_DEBUG
120 #include <metal/log.h>
121 #include <metal/assert.h>
122 
123 #define VQASSERT(_vq, _exp, _msg) \
124 	do { \
125 		if (!(_exp)) { \
126 			metal_log(METAL_LOG_EMERGENCY, \
127 				  "%s: %s - _msg", __func__, (_vq)->vq_name); \
128 			metal_assert(_exp); \
129 		} \
130 	} while (0)
131 
132 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)            \
133 	VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, "invalid ring index")
134 
135 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)                \
136 	VQASSERT((_vq), (_vq)->vq_desc_head_idx ==            \
137 	VQ_RING_DESC_CHAIN_END, \
138 	"full ring terminated incorrectly: invalid head")
139 
140 #define VQ_PARAM_CHK(condition, status_var, status_err) \
141 	do {						\
142 		if (((status_var) == 0) && (condition)) { \
143 			status_var = status_err;        \
144 		}					\
145 	} while (0)
146 
147 #define VQUEUE_BUSY(vq) \
148 	do {						     \
149 		if (!(vq)->vq_inuse)                 \
150 			(vq)->vq_inuse = true;               \
151 		else                                         \
152 			VQASSERT(vq, !(vq)->vq_inuse,\
153 				"VirtQueue already in use")  \
154 	} while (0)
155 
156 #define VQUEUE_IDLE(vq)            ((vq)->vq_inuse = false)
157 
158 #else
159 
160 #define KASSERT(cond, str)
161 #define VQASSERT(_vq, _exp, _msg)
162 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
163 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)
164 #define VQ_PARAM_CHK(condition, status_var, status_err)
165 #define VQUEUE_BUSY(vq)
166 #define VQUEUE_IDLE(vq)
167 
168 #endif
169 
170 int virtqueue_create(struct virtio_device *device, unsigned short id,
171 		     const char *name, struct vring_alloc_info *ring,
172 		     void (*callback)(struct virtqueue *vq),
173 		     void (*notify)(struct virtqueue *vq),
174 		     struct virtqueue *v_queue);
175 
176 /*
177  * virtqueue_set_shmem_io
178  *
179  * set virtqueue shared memory I/O region
180  *
181  * @vq - virt queue
182  * @io - pointer to the shared memory I/O region
183  */
virtqueue_set_shmem_io(struct virtqueue * vq,struct metal_io_region * io)184 static inline void virtqueue_set_shmem_io(struct virtqueue *vq,
185 					  struct metal_io_region *io)
186 {
187 	vq->shm_io = io;
188 }
189 
190 int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
191 			 int readable, int writable, void *cookie);
192 
193 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
194 
195 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
196 				     uint32_t *len);
197 
198 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
199 				  uint32_t len);
200 
201 void virtqueue_disable_cb(struct virtqueue *vq);
202 
203 int virtqueue_enable_cb(struct virtqueue *vq);
204 
205 void virtqueue_kick(struct virtqueue *vq);
206 
virtqueue_allocate(unsigned int num_desc_extra)207 static inline struct virtqueue *virtqueue_allocate(unsigned int num_desc_extra)
208 {
209 	struct virtqueue *vqs;
210 	uint32_t vq_size = sizeof(struct virtqueue) +
211 		 num_desc_extra * sizeof(struct vq_desc_extra);
212 
213 	vqs = (struct virtqueue *)metal_allocate_memory(vq_size);
214 
215 	if (vqs) {
216 		memset(vqs, 0x00, vq_size);
217 	}
218 
219 	return vqs;
220 }
221 
222 void virtqueue_free(struct virtqueue *vq);
223 
224 void virtqueue_dump(struct virtqueue *vq);
225 
226 void virtqueue_notification(struct virtqueue *vq);
227 
228 uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
229 
230 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx);
231 
232 #if defined __cplusplus
233 }
234 #endif
235 
236 #endif				/* VIRTQUEUE_H_ */
237