1 /*
2  * Copyright (c) 2024 Antmicro <www.antmicro.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/virtio/virtqueue.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/__assert.h>
11 #include <zephyr/sys/byteorder.h>
12 #include <zephyr/sys/barrier.h>
13 #include <errno.h>
14 
15 LOG_MODULE_REGISTER(virtio, CONFIG_VIRTIO_LOG_LEVEL);
16 
17 /*
18  * Based on Virtual I/O Device (VIRTIO) Version 1.3 specification:
19  * https://docs.oasis-open.org/virtio/virtio/v1.3/csd01/virtio-v1.3-csd01.pdf
20  */
21 
22 /*
23  * The maximum queue size is 2^15 (see 2.7),
24  * so any 16bit value larger than that can be used as a sentinel in the next field
25  */
26 #define VIRTQ_DESC_NEXT_SENTINEL 0xffff
27 
28 /* According to the spec 2.7.5.2 the maximum size of descriptor chain is 4GB */
29 #define MAX_DESCRIPTOR_CHAIN_LENGTH ((uint64_t)1 << 32)
30 
virtq_create(struct virtq * v,size_t size)31 int virtq_create(struct virtq *v, size_t size)
32 {
33 	__ASSERT(IS_POWER_OF_TWO(size), "size of virtqueue must be a power of 2");
34 	__ASSERT(size <= KB(32), "size of virtqueue must be at most 32KB");
35 	/*
36 	 * For sizes and alignments see table in spec 2.7. We are supporting only modern virtio, so
37 	 * we don't have to adhere to additional constraints from spec 2.7.2
38 	 */
39 	size_t descriptor_table_size = 16 * size;
40 	size_t available_ring_size = 2 * size + 6;
41 	size_t used_ring_pad = (descriptor_table_size + available_ring_size) % 4;
42 	size_t used_ring_size = 8 * size + 6;
43 	size_t shared_size =
44 		descriptor_table_size + available_ring_size + used_ring_pad + used_ring_size;
45 	size_t v_size = shared_size + sizeof(struct virtq_receive_callback_entry) * size;
46 
47 	uint8_t *v_area = k_aligned_alloc(16, v_size);
48 
49 	if (!v_area) {
50 		LOG_ERR("unable to allocate virtqueue");
51 		return -ENOMEM;
52 	}
53 
54 	v->num = size;
55 	v->desc = (struct virtq_desc *)v_area;
56 	v->avail = (struct virtq_avail *)((uint8_t *)v->desc + descriptor_table_size);
57 	v->used = (struct virtq_used *)((uint8_t *)v->avail + available_ring_size + used_ring_pad);
58 	v->recv_cbs = (struct virtq_receive_callback_entry *)((uint8_t *)v->used + used_ring_size);
59 
60 	/*
61 	 * At the beginning of the descriptor table, the available ring and the used ring have to be
62 	 * set to zero. It's the case for both PCI (4.1.5.1.3) and MMIO (4.2.3.2) transport options.
63 	 * Its unspecified for channel I/O (chapter 4.3), but its used on platforms not supported by
64 	 * Zephyr, so we don't have to handle it here
65 	 */
66 	memset(v_area, 0, v_size);
67 
68 	v->last_used_idx = 0;
69 
70 	k_stack_alloc_init(&v->free_desc_stack, size);
71 	for (uint16_t i = 0; i < size; i++) {
72 		k_stack_push(&v->free_desc_stack, i);
73 	}
74 	v->free_desc_n = size;
75 
76 	return 0;
77 }
78 
virtq_free(struct virtq * v)79 void virtq_free(struct virtq *v)
80 {
81 	k_free(v->desc);
82 	k_stack_cleanup(&v->free_desc_stack);
83 }
84 
virtq_add_available(struct virtq * v,uint16_t desc_idx)85 static int virtq_add_available(struct virtq *v, uint16_t desc_idx)
86 {
87 	uint16_t new_idx_le = sys_cpu_to_le16(sys_le16_to_cpu(v->avail->idx) % v->num);
88 
89 	v->avail->ring[new_idx_le] = sys_cpu_to_le16(desc_idx);
90 	barrier_dmem_fence_full();
91 	v->avail->idx = sys_cpu_to_le16(sys_le16_to_cpu(v->avail->idx) + 1);
92 
93 	return 0;
94 }
95 
virtq_add_buffer_chain(struct virtq * v,struct virtq_buf * bufs,uint16_t bufs_size,uint16_t device_readable_count,virtq_receive_callback cb,void * cb_opaque,k_timeout_t timeout)96 int virtq_add_buffer_chain(
97 	struct virtq *v, struct virtq_buf *bufs, uint16_t bufs_size,
98 	uint16_t device_readable_count, virtq_receive_callback cb, void *cb_opaque,
99 	k_timeout_t timeout)
100 {
101 	uint64_t total_len = 0;
102 
103 	for (int i = 0; i < bufs_size; i++) {
104 		total_len += bufs[i].len;
105 	}
106 
107 	if (total_len > MAX_DESCRIPTOR_CHAIN_LENGTH) {
108 		LOG_ERR("buffer chain is longer than 2^32 bytes");
109 		return -EINVAL;
110 	}
111 
112 	k_spinlock_key_t key = k_spin_lock(&v->lock);
113 
114 	if (v->free_desc_n < bufs_size && !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
115 		/* we don't have enough free descriptors to push all buffers to the queue */
116 		k_spin_unlock(&v->lock, key);
117 		return -EBUSY;
118 	}
119 
120 	uint16_t prev_desc = VIRTQ_DESC_NEXT_SENTINEL;
121 	uint16_t head = VIRTQ_DESC_NEXT_SENTINEL;
122 
123 	for (uint16_t buf_n = 0; buf_n < bufs_size; buf_n++) {
124 		uint16_t desc;
125 
126 		/*
127 		 * we've checked before that we have enough free descriptors
128 		 * and the queue is locked, so popping from stack is guaranteed
129 		 * to succeed and we don't have to check its return value
130 		 */
131 		virtq_get_free_desc(v, &desc, timeout);
132 
133 		uint16_t desc_le = sys_cpu_to_le16(desc);
134 
135 		if (head == VIRTQ_DESC_NEXT_SENTINEL) {
136 			head = desc;
137 		}
138 		v->desc[desc_le].addr = k_mem_phys_addr(bufs[buf_n].addr);
139 		v->desc[desc_le].len = bufs[buf_n].len;
140 		if (buf_n < device_readable_count) {
141 			v->desc[desc_le].flags = 0;
142 		} else {
143 			v->desc[desc_le].flags = VIRTQ_DESC_F_WRITE;
144 		}
145 		if (buf_n < bufs_size - 1) {
146 			v->desc[desc_le].flags |= VIRTQ_DESC_F_NEXT;
147 		} else {
148 			v->desc[desc_le].next = 0;
149 		}
150 
151 		if (prev_desc != VIRTQ_DESC_NEXT_SENTINEL) {
152 			uint16_t prev_desc_le = sys_cpu_to_le16(prev_desc);
153 
154 			v->desc[prev_desc_le].next = desc_le;
155 		}
156 
157 		prev_desc = desc;
158 	}
159 
160 	v->recv_cbs[head].cb = cb;
161 	v->recv_cbs[head].opaque = cb_opaque;
162 
163 	virtq_add_available(v, head);
164 
165 	k_spin_unlock(&v->lock, key);
166 
167 	return 0;
168 }
169 
virtq_get_free_desc(struct virtq * v,uint16_t * desc_idx,k_timeout_t timeout)170 int virtq_get_free_desc(struct virtq *v, uint16_t *desc_idx, k_timeout_t timeout)
171 {
172 	stack_data_t desc;
173 
174 	int ret = k_stack_pop(&v->free_desc_stack, &desc, timeout);
175 
176 	if (ret == 0) {
177 		*desc_idx = (uint16_t)desc;
178 		v->free_desc_n--;
179 	}
180 
181 	return ret;
182 }
183 
virtq_add_free_desc(struct virtq * v,uint16_t desc_idx)184 void virtq_add_free_desc(struct virtq *v, uint16_t desc_idx)
185 {
186 	k_stack_push(&v->free_desc_stack, desc_idx);
187 	v->free_desc_n++;
188 }
189