1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13
14 #include "xsk.h"
15
16 struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
20 */
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
24 u32 flags;
25 u32 pad3 ____cacheline_aligned_in_smp;
26 };
27
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
30 struct xdp_ring ptrs;
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32 };
33
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
36 struct xdp_ring ptrs;
37 u64 desc[] ____cacheline_aligned_in_smp;
38 };
39
40 struct xsk_queue {
41 u32 ring_mask;
42 u32 nentries;
43 u32 cached_prod;
44 u32 cached_cons;
45 struct xdp_ring *ring;
46 u64 invalid_descs;
47 u64 queue_empty_descs;
48 size_t ring_vmalloc_size;
49 };
50
51 /* The structure of the shared state of the rings are a simple
52 * circular buffer, as outlined in
53 * Documentation/core-api/circular-buffers.rst. For the Rx and
54 * completion ring, the kernel is the producer and user space is the
55 * consumer. For the Tx and fill rings, the kernel is the consumer and
56 * user space is the producer.
57 *
58 * producer consumer
59 *
60 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
61 * STORE $data LOAD $data
62 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
63 * }
64 *
65 * (A) pairs with (D), and (B) pairs with (C).
66 *
67 * Starting with (B), it protects the data from being written after
68 * the producer pointer. If this barrier was missing, the consumer
69 * could observe the producer pointer being set and thus load the data
70 * before the producer has written the new data. The consumer would in
71 * this case load the old data.
72 *
73 * (C) protects the consumer from speculatively loading the data before
74 * the producer pointer actually has been read. If we do not have this
75 * barrier, some architectures could load old data as speculative loads
76 * are not discarded as the CPU does not know there is a dependency
77 * between ->producer and data.
78 *
79 * (A) is a control dependency that separates the load of ->consumer
80 * from the stores of $data. In case ->consumer indicates there is no
81 * room in the buffer to store $data we do not. The dependency will
82 * order both of the stores after the loads. So no barrier is needed.
83 *
84 * (D) protects the load of the data to be observed to happen after the
85 * store of the consumer pointer. If we did not have this memory
86 * barrier, the producer could observe the consumer pointer being set
87 * and overwrite the data with a new value before the consumer got the
88 * chance to read the old value. The consumer would thus miss reading
89 * the old entry and very likely read the new entry twice, once right
90 * now and again after circling through the ring.
91 */
92
93 /* The operations on the rings are the following:
94 *
95 * producer consumer
96 *
97 * RESERVE entries PEEK in the ring for entries
98 * WRITE data into the ring READ data from the ring
99 * SUBMIT entries RELEASE entries
100 *
101 * The producer reserves one or more entries in the ring. It can then
102 * fill in these entries and finally submit them so that they can be
103 * seen and read by the consumer.
104 *
105 * The consumer peeks into the ring to see if the producer has written
106 * any new entries. If so, the consumer can then read these entries
107 * and when it is done reading them release them back to the producer
108 * so that the producer can use these slots to fill in new entries.
109 *
110 * The function names below reflect these operations.
111 */
112
113 /* Functions that read and validate content from consumer rings. */
114
__xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr)115 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
116 {
117 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
118 u32 idx = cached_cons & q->ring_mask;
119
120 *addr = ring->desc[idx];
121 }
122
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)123 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
124 {
125 if (q->cached_cons != q->cached_prod) {
126 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
127 return true;
128 }
129
130 return false;
131 }
132
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)133 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
134 struct xdp_desc *desc)
135 {
136 u64 chunk, chunk_end;
137
138 chunk = xp_aligned_extract_addr(pool, desc->addr);
139 if (likely(desc->len)) {
140 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
141 if (chunk != chunk_end)
142 return false;
143 }
144
145 if (chunk >= pool->addrs_cnt)
146 return false;
147
148 if (desc->options)
149 return false;
150 return true;
151 }
152
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)153 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
154 struct xdp_desc *desc)
155 {
156 u64 addr, base_addr;
157
158 base_addr = xp_unaligned_extract_addr(desc->addr);
159 addr = xp_unaligned_add_offset_to_addr(desc->addr);
160
161 if (desc->len > pool->chunk_size)
162 return false;
163
164 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
165 xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
166 return false;
167
168 if (desc->options)
169 return false;
170 return true;
171 }
172
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)173 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
174 struct xdp_desc *desc)
175 {
176 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
177 xp_aligned_validate_desc(pool, desc);
178 }
179
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)180 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
181 struct xdp_desc *d,
182 struct xsk_buff_pool *pool)
183 {
184 if (!xp_validate_desc(pool, d)) {
185 q->invalid_descs++;
186 return false;
187 }
188 return true;
189 }
190
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)191 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
192 struct xdp_desc *desc,
193 struct xsk_buff_pool *pool)
194 {
195 while (q->cached_cons != q->cached_prod) {
196 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
197 u32 idx = q->cached_cons & q->ring_mask;
198
199 *desc = ring->desc[idx];
200 if (xskq_cons_is_valid_desc(q, desc, pool))
201 return true;
202
203 q->cached_cons++;
204 }
205
206 return false;
207 }
208
xskq_cons_release_n(struct xsk_queue * q,u32 cnt)209 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
210 {
211 q->cached_cons += cnt;
212 }
213
xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max)214 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
215 u32 max)
216 {
217 u32 cached_cons = q->cached_cons, nb_entries = 0;
218 struct xdp_desc *descs = pool->tx_descs;
219
220 while (cached_cons != q->cached_prod && nb_entries < max) {
221 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
222 u32 idx = cached_cons & q->ring_mask;
223
224 descs[nb_entries] = ring->desc[idx];
225 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
226 /* Skip the entry */
227 cached_cons++;
228 continue;
229 }
230
231 nb_entries++;
232 cached_cons++;
233 }
234
235 /* Release valid plus any invalid entries */
236 xskq_cons_release_n(q, cached_cons - q->cached_cons);
237 return nb_entries;
238 }
239
240 /* Functions for consumers */
241
__xskq_cons_release(struct xsk_queue * q)242 static inline void __xskq_cons_release(struct xsk_queue *q)
243 {
244 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
245 }
246
__xskq_cons_peek(struct xsk_queue * q)247 static inline void __xskq_cons_peek(struct xsk_queue *q)
248 {
249 /* Refresh the local pointer */
250 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
251 }
252
xskq_cons_get_entries(struct xsk_queue * q)253 static inline void xskq_cons_get_entries(struct xsk_queue *q)
254 {
255 __xskq_cons_release(q);
256 __xskq_cons_peek(q);
257 }
258
xskq_cons_nb_entries(struct xsk_queue * q,u32 max)259 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
260 {
261 u32 entries = q->cached_prod - q->cached_cons;
262
263 if (entries >= max)
264 return max;
265
266 __xskq_cons_peek(q);
267 entries = q->cached_prod - q->cached_cons;
268
269 return entries >= max ? max : entries;
270 }
271
xskq_cons_has_entries(struct xsk_queue * q,u32 cnt)272 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
273 {
274 return xskq_cons_nb_entries(q, cnt) >= cnt;
275 }
276
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)277 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
278 {
279 if (q->cached_prod == q->cached_cons)
280 xskq_cons_get_entries(q);
281 return xskq_cons_read_addr_unchecked(q, addr);
282 }
283
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)284 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
285 struct xdp_desc *desc,
286 struct xsk_buff_pool *pool)
287 {
288 if (q->cached_prod == q->cached_cons)
289 xskq_cons_get_entries(q);
290 return xskq_cons_read_desc(q, desc, pool);
291 }
292
293 /* To improve performance in the xskq_cons_release functions, only update local state here.
294 * Reflect this to global state when we get new entries from the ring in
295 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
296 */
xskq_cons_release(struct xsk_queue * q)297 static inline void xskq_cons_release(struct xsk_queue *q)
298 {
299 q->cached_cons++;
300 }
301
xskq_cons_present_entries(struct xsk_queue * q)302 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
303 {
304 /* No barriers needed since data is not accessed */
305 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
306 }
307
308 /* Functions for producers */
309
xskq_prod_nb_free(struct xsk_queue * q,u32 max)310 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
311 {
312 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
313
314 if (free_entries >= max)
315 return max;
316
317 /* Refresh the local tail pointer */
318 q->cached_cons = READ_ONCE(q->ring->consumer);
319 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
320
321 return free_entries >= max ? max : free_entries;
322 }
323
xskq_prod_is_full(struct xsk_queue * q)324 static inline bool xskq_prod_is_full(struct xsk_queue *q)
325 {
326 return xskq_prod_nb_free(q, 1) ? false : true;
327 }
328
xskq_prod_cancel(struct xsk_queue * q)329 static inline void xskq_prod_cancel(struct xsk_queue *q)
330 {
331 q->cached_prod--;
332 }
333
xskq_prod_reserve(struct xsk_queue * q)334 static inline int xskq_prod_reserve(struct xsk_queue *q)
335 {
336 if (xskq_prod_is_full(q))
337 return -ENOSPC;
338
339 /* A, matches D */
340 q->cached_prod++;
341 return 0;
342 }
343
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)344 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
345 {
346 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
347
348 if (xskq_prod_is_full(q))
349 return -ENOSPC;
350
351 /* A, matches D */
352 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
353 return 0;
354 }
355
xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries)356 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
357 u32 nb_entries)
358 {
359 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
360 u32 i, cached_prod;
361
362 /* A, matches D */
363 cached_prod = q->cached_prod;
364 for (i = 0; i < nb_entries; i++)
365 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
366 q->cached_prod = cached_prod;
367 }
368
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len)369 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
370 u64 addr, u32 len)
371 {
372 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
373 u32 idx;
374
375 if (xskq_prod_is_full(q))
376 return -ENOBUFS;
377
378 /* A, matches D */
379 idx = q->cached_prod++ & q->ring_mask;
380 ring->desc[idx].addr = addr;
381 ring->desc[idx].len = len;
382
383 return 0;
384 }
385
__xskq_prod_submit(struct xsk_queue * q,u32 idx)386 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
387 {
388 smp_store_release(&q->ring->producer, idx); /* B, matches C */
389 }
390
xskq_prod_submit(struct xsk_queue * q)391 static inline void xskq_prod_submit(struct xsk_queue *q)
392 {
393 __xskq_prod_submit(q, q->cached_prod);
394 }
395
xskq_prod_submit_addr(struct xsk_queue * q,u64 addr)396 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
397 {
398 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
399 u32 idx = q->ring->producer;
400
401 ring->desc[idx++ & q->ring_mask] = addr;
402
403 __xskq_prod_submit(q, idx);
404 }
405
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)406 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
407 {
408 __xskq_prod_submit(q, q->ring->producer + nb_entries);
409 }
410
xskq_prod_is_empty(struct xsk_queue * q)411 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
412 {
413 /* No barriers needed since data is not accessed */
414 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
415 }
416
417 /* For both producers and consumers */
418
xskq_nb_invalid_descs(struct xsk_queue * q)419 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
420 {
421 return q ? q->invalid_descs : 0;
422 }
423
xskq_nb_queue_empty_descs(struct xsk_queue * q)424 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
425 {
426 return q ? q->queue_empty_descs : 0;
427 }
428
429 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
430 void xskq_destroy(struct xsk_queue *q_ops);
431
432 #endif /* _LINUX_XSK_QUEUE_H */
433