1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_tc, CONFIG_NET_TC_LOG_LEVEL);
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_pkt.h>
15 #include <zephyr/net/net_stats.h>
16
17 #include "net_private.h"
18 #include "net_stats.h"
19 #include "net_tc_mapping.h"
20
21 #define TC_RX_PSEUDO_QUEUE (COND_CODE_1(CONFIG_NET_TC_RX_SKIP_FOR_HIGH_PRIO, (1), (0)))
22 #define NET_TC_RX_EFFECTIVE_COUNT (NET_TC_RX_COUNT + TC_RX_PSEUDO_QUEUE)
23
24 #if NET_TC_RX_EFFECTIVE_COUNT > 1
25 #define NET_TC_RX_SLOTS (CONFIG_NET_PKT_RX_COUNT / NET_TC_RX_EFFECTIVE_COUNT)
26 BUILD_ASSERT(NET_TC_RX_SLOTS > 0,
27 "Misconfiguration: There are more traffic classes then packets, "
28 "either increase CONFIG_NET_PKT_RX_COUNT or decrease "
29 "CONFIG_NET_TC_RX_COUNT or disable CONFIG_NET_TC_RX_SKIP_FOR_HIGH_PRIO");
30 #endif
31
32 #define TC_TX_PSEUDO_QUEUE (COND_CODE_1(CONFIG_NET_TC_TX_SKIP_FOR_HIGH_PRIO, (1), (0)))
33 #define NET_TC_TX_EFFECTIVE_COUNT (NET_TC_TX_COUNT + TC_TX_PSEUDO_QUEUE)
34
35 #if NET_TC_TX_EFFECTIVE_COUNT > 1
36 #define NET_TC_TX_SLOTS (CONFIG_NET_PKT_TX_COUNT / NET_TC_TX_EFFECTIVE_COUNT)
37 BUILD_ASSERT(NET_TC_TX_SLOTS > 0,
38 "Misconfiguration: There are more traffic classes then packets, "
39 "either increase CONFIG_NET_PKT_TX_COUNT or decrease "
40 "CONFIG_NET_TC_TX_COUNT or disable CONFIG_NET_TC_TX_SKIP_FOR_HIGH_PRIO");
41 #endif
42
43 #if NET_TC_RX_EFFECTIVE_COUNT > 1
44 #define NET_TC_RETRY_CNT 1
45 #endif
46 /* Template for thread name. The "xx" is either "TX" denoting transmit thread,
47 * or "RX" denoting receive thread. The "q[y]" denotes the traffic class queue
48 * where y indicates the traffic class id. The value of y can be from 0 to 7.
49 */
50 #define MAX_NAME_LEN sizeof("xx_q[y]")
51
52 /* Stacks for TX work queue */
53 K_KERNEL_STACK_ARRAY_DEFINE(tx_stack, NET_TC_TX_COUNT,
54 CONFIG_NET_TX_STACK_SIZE);
55
56 /* Stacks for RX work queue */
57 K_KERNEL_STACK_ARRAY_DEFINE(rx_stack, NET_TC_RX_COUNT,
58 CONFIG_NET_RX_STACK_SIZE);
59
60 #if NET_TC_TX_COUNT > 0
61 static struct net_traffic_class tx_classes[NET_TC_TX_COUNT];
62 #endif
63
64 #if NET_TC_RX_COUNT > 0
65 static struct net_traffic_class rx_classes[NET_TC_RX_COUNT];
66 #endif
67
net_tc_try_submit_to_tx_queue(uint8_t tc,struct net_pkt * pkt,k_timeout_t timeout)68 enum net_verdict net_tc_try_submit_to_tx_queue(uint8_t tc, struct net_pkt *pkt,
69 k_timeout_t timeout)
70 {
71 #if NET_TC_TX_COUNT > 0
72 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
73
74 #if NET_TC_TX_EFFECTIVE_COUNT > 1
75 if (k_sem_take(&tx_classes[tc].fifo_slot, timeout) != 0) {
76 return NET_DROP;
77 }
78 #endif
79
80 k_fifo_put(&tx_classes[tc].fifo, pkt);
81 return NET_OK;
82 #else
83 ARG_UNUSED(tc);
84 ARG_UNUSED(pkt);
85 return NET_DROP;
86 #endif
87 }
88
net_tc_submit_to_rx_queue(uint8_t tc,struct net_pkt * pkt)89 enum net_verdict net_tc_submit_to_rx_queue(uint8_t tc, struct net_pkt *pkt)
90 {
91 #if NET_TC_RX_COUNT > 0
92 #if NET_TC_RX_EFFECTIVE_COUNT > 1
93 uint8_t retry_cnt = NET_TC_RETRY_CNT;
94 #endif
95 net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
96
97 #if NET_TC_RX_EFFECTIVE_COUNT > 1
98 while (k_sem_take(&rx_classes[tc].fifo_slot, K_NO_WAIT) != 0) {
99 if (k_is_in_isr() || retry_cnt == 0) {
100 return NET_DROP;
101 }
102
103 retry_cnt--;
104 /* Let thread with same priority run,
105 * try to reduce dropping packets
106 */
107 k_yield();
108 }
109 #endif
110
111 k_fifo_put(&rx_classes[tc].fifo, pkt);
112 return NET_OK;
113 #else
114 ARG_UNUSED(tc);
115 ARG_UNUSED(pkt);
116 return NET_DROP;
117 #endif
118 }
119
net_tx_priority2tc(enum net_priority prio)120 int net_tx_priority2tc(enum net_priority prio)
121 {
122 #if NET_TC_TX_COUNT > 0
123 if (prio > NET_PRIORITY_NC) {
124 /* Use default value suggested in 802.1Q */
125 prio = NET_PRIORITY_BE;
126 }
127
128 return tx_prio2tc_map[prio];
129 #else
130 ARG_UNUSED(prio);
131
132 return 0;
133 #endif
134 }
135
net_rx_priority2tc(enum net_priority prio)136 int net_rx_priority2tc(enum net_priority prio)
137 {
138 #if NET_TC_RX_COUNT > 0
139 if (prio > NET_PRIORITY_NC) {
140 /* Use default value suggested in 802.1Q */
141 prio = NET_PRIORITY_BE;
142 }
143
144 return rx_prio2tc_map[prio];
145 #else
146 ARG_UNUSED(prio);
147
148 return 0;
149 #endif
150 }
151
152 #if defined(CONFIG_NET_TC_THREAD_PRIO_CUSTOM)
153 #define BASE_PRIO_TX CONFIG_NET_TC_TX_THREAD_BASE_PRIO
154 #elif defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
155 #define BASE_PRIO_TX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
156 #else
157 #define BASE_PRIO_TX (CONFIG_NET_TC_TX_COUNT - 1)
158 #endif
159
160 #define PRIO_TX(i, _) (BASE_PRIO_TX - i)
161
162 #if defined(CONFIG_NET_TC_THREAD_PRIO_CUSTOM)
163 #define BASE_PRIO_RX CONFIG_NET_TC_RX_THREAD_BASE_PRIO
164 #elif defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
165 #define BASE_PRIO_RX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
166 #else
167 #define BASE_PRIO_RX (CONFIG_NET_TC_RX_COUNT - 1)
168 #endif
169
170 #define PRIO_RX(i, _) (BASE_PRIO_RX - i)
171
172 #if NET_TC_TX_COUNT > 0
173 /* Convert traffic class to thread priority */
tx_tc2thread(uint8_t tc)174 static uint8_t tx_tc2thread(uint8_t tc)
175 {
176 /* Initial implementation just maps the traffic class to certain queue.
177 * If there are less queues than classes, then map them into
178 * some specific queue.
179 *
180 * Lower value in this table means higher thread priority. The
181 * value is used as a parameter to K_PRIO_COOP() or K_PRIO_PREEMPT()
182 * which converts it to actual thread priority.
183 *
184 * Higher traffic class value means higher priority queue. This means
185 * that thread_priorities[7] value should contain the highest priority
186 * for the TX queue handling thread.
187 *
188 * For example, if NET_TC_TX_COUNT = 8, which is the maximum number of
189 * traffic classes, then this priority array will contain following
190 * values if preemptive priorities are used:
191 * 7, 6, 5, 4, 3, 2, 1, 0
192 * and
193 * 14, 13, 12, 11, 10, 9, 8, 7
194 * if cooperative priorities are used.
195 *
196 * Then these will be converted to following thread priorities if
197 * CONFIG_NET_TC_THREAD_COOPERATIVE is enabled:
198 * -1, -2, -3, -4, -5, -6, -7, -8
199 *
200 * and if CONFIG_NET_TC_THREAD_PREEMPTIVE is enabled, following thread
201 * priorities are used:
202 * 7, 6, 5, 4, 3, 2, 1, 0
203 *
204 * This means that the lowest traffic class 1, will have the lowest
205 * cooperative priority -1 for coop priorities and 7 for preemptive
206 * priority.
207 */
208 static const uint8_t thread_priorities[] = {
209 LISTIFY(NET_TC_TX_COUNT, PRIO_TX, (,))
210 };
211
212 BUILD_ASSERT(NET_TC_TX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
213 "Too many traffic classes");
214
215 NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
216
217 return thread_priorities[tc];
218 }
219 #endif
220
221 #if NET_TC_RX_COUNT > 0
222 /* Convert traffic class to thread priority */
rx_tc2thread(uint8_t tc)223 static uint8_t rx_tc2thread(uint8_t tc)
224 {
225 static const uint8_t thread_priorities[] = {
226 LISTIFY(NET_TC_RX_COUNT, PRIO_RX, (,))
227 };
228
229 BUILD_ASSERT(NET_TC_RX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
230 "Too many traffic classes");
231
232 NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
233
234 return thread_priorities[tc];
235 }
236 #endif
237
238 #if defined(CONFIG_NET_STATISTICS)
239 /* Fixup the traffic class statistics so that "net stats" shell command will
240 * print output correctly.
241 */
242 #if NET_TC_TX_COUNT > 0
tc_tx_stats_priority_setup(struct net_if * iface)243 static void tc_tx_stats_priority_setup(struct net_if *iface)
244 {
245 int i;
246
247 for (i = 0; i < 8; i++) {
248 net_stats_update_tc_sent_priority(iface, net_tx_priority2tc(i),
249 i);
250 }
251 }
252 #endif
253
254 #if NET_TC_RX_COUNT > 0
tc_rx_stats_priority_setup(struct net_if * iface)255 static void tc_rx_stats_priority_setup(struct net_if *iface)
256 {
257 int i;
258
259 for (i = 0; i < 8; i++) {
260 net_stats_update_tc_recv_priority(iface, net_rx_priority2tc(i),
261 i);
262 }
263 }
264 #endif
265
266 #if NET_TC_TX_COUNT > 0
net_tc_tx_stats_priority_setup(struct net_if * iface,void * user_data)267 static void net_tc_tx_stats_priority_setup(struct net_if *iface,
268 void *user_data)
269 {
270 ARG_UNUSED(user_data);
271
272 tc_tx_stats_priority_setup(iface);
273 }
274 #endif
275
276 #if NET_TC_RX_COUNT > 0
net_tc_rx_stats_priority_setup(struct net_if * iface,void * user_data)277 static void net_tc_rx_stats_priority_setup(struct net_if *iface,
278 void *user_data)
279 {
280 ARG_UNUSED(user_data);
281
282 tc_rx_stats_priority_setup(iface);
283 }
284 #endif
285 #endif
286
287 #if NET_TC_RX_COUNT > 0
tc_rx_handler(void * p1,void * p2,void * p3)288 static void tc_rx_handler(void *p1, void *p2, void *p3)
289 {
290 ARG_UNUSED(p3);
291
292 struct k_fifo *fifo = p1;
293 #if NET_TC_RX_EFFECTIVE_COUNT > 1
294 struct k_sem *fifo_slot = p2;
295 #else
296 ARG_UNUSED(p2);
297 #endif
298 struct net_pkt *pkt;
299
300 while (1) {
301 pkt = k_fifo_get(fifo, K_FOREVER);
302 if (pkt == NULL) {
303 continue;
304 }
305
306 #if NET_TC_RX_EFFECTIVE_COUNT > 1
307 k_sem_give(fifo_slot);
308 #endif
309
310 net_process_rx_packet(pkt);
311 }
312 }
313 #endif
314
315 #if NET_TC_TX_COUNT > 0
tc_tx_handler(void * p1,void * p2,void * p3)316 static void tc_tx_handler(void *p1, void *p2, void *p3)
317 {
318 ARG_UNUSED(p3);
319
320 struct k_fifo *fifo = p1;
321 #if NET_TC_TX_EFFECTIVE_COUNT > 1
322 struct k_sem *fifo_slot = p2;
323 #else
324 ARG_UNUSED(p2);
325 #endif
326 struct net_pkt *pkt;
327
328 while (1) {
329 pkt = k_fifo_get(fifo, K_FOREVER);
330 if (pkt == NULL) {
331 continue;
332 }
333
334 #if NET_TC_TX_EFFECTIVE_COUNT > 1
335 k_sem_give(fifo_slot);
336 #endif
337
338 net_process_tx_packet(pkt);
339 }
340 }
341 #endif
342
343 /* Create a fifo for each traffic class we are using. All the network
344 * traffic goes through these classes.
345 */
net_tc_tx_init(void)346 void net_tc_tx_init(void)
347 {
348 #if NET_TC_TX_COUNT == 0
349 NET_DBG("No %s thread created", "TX");
350 return;
351 #else
352 int i;
353
354 BUILD_ASSERT(NET_TC_TX_COUNT >= 0);
355
356 #if defined(CONFIG_NET_STATISTICS)
357 net_if_foreach(net_tc_tx_stats_priority_setup, NULL);
358 #endif
359
360 for (i = 0; i < NET_TC_TX_COUNT; i++) {
361 uint8_t thread_priority;
362 int priority;
363 k_tid_t tid;
364
365 thread_priority = tx_tc2thread(i);
366
367 priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
368 K_PRIO_COOP(thread_priority) :
369 K_PRIO_PREEMPT(thread_priority);
370
371 NET_DBG("[%d] Starting TX handler %p stack size %zd "
372 "prio %d %s(%d)", i,
373 &tx_classes[i].handler,
374 K_KERNEL_STACK_SIZEOF(tx_stack[i]),
375 thread_priority,
376 IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
377 "coop" : "preempt",
378 priority);
379
380 k_fifo_init(&tx_classes[i].fifo);
381
382 #if NET_TC_TX_EFFECTIVE_COUNT > 1
383 k_sem_init(&tx_classes[i].fifo_slot, NET_TC_TX_SLOTS, NET_TC_TX_SLOTS);
384 #endif
385
386 tid = k_thread_create(&tx_classes[i].handler, tx_stack[i],
387 K_KERNEL_STACK_SIZEOF(tx_stack[i]),
388 tc_tx_handler,
389 &tx_classes[i].fifo,
390 #if NET_TC_TX_EFFECTIVE_COUNT > 1
391 &tx_classes[i].fifo_slot,
392 #else
393 NULL,
394 #endif
395 NULL,
396 priority, 0, K_FOREVER);
397 if (!tid) {
398 NET_ERR("Cannot create TC handler thread %d", i);
399 continue;
400 }
401
402 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
403 char name[MAX_NAME_LEN];
404
405 snprintk(name, sizeof(name), "tx_q[%d]", i);
406 k_thread_name_set(tid, name);
407 }
408
409 k_thread_start(tid);
410 }
411 #endif
412 }
413
net_tc_rx_init(void)414 void net_tc_rx_init(void)
415 {
416 #if NET_TC_RX_COUNT == 0
417 NET_DBG("No %s thread created", "RX");
418 return;
419 #else
420 int i;
421
422 BUILD_ASSERT(NET_TC_RX_COUNT >= 0);
423
424 #if defined(CONFIG_NET_STATISTICS)
425 net_if_foreach(net_tc_rx_stats_priority_setup, NULL);
426 #endif
427
428 for (i = 0; i < NET_TC_RX_COUNT; i++) {
429 uint8_t thread_priority;
430 int priority;
431 k_tid_t tid;
432
433 thread_priority = rx_tc2thread(i);
434
435 priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
436 K_PRIO_COOP(thread_priority) :
437 K_PRIO_PREEMPT(thread_priority);
438
439 NET_DBG("[%d] Starting RX handler %p stack size %zd "
440 "prio %d %s(%d)", i,
441 &rx_classes[i].handler,
442 K_KERNEL_STACK_SIZEOF(rx_stack[i]),
443 thread_priority,
444 IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
445 "coop" : "preempt",
446 priority);
447
448 k_fifo_init(&rx_classes[i].fifo);
449
450 #if NET_TC_RX_EFFECTIVE_COUNT > 1
451 k_sem_init(&rx_classes[i].fifo_slot, NET_TC_RX_SLOTS, NET_TC_RX_SLOTS);
452 #endif
453
454 tid = k_thread_create(&rx_classes[i].handler, rx_stack[i],
455 K_KERNEL_STACK_SIZEOF(rx_stack[i]),
456 tc_rx_handler,
457 &rx_classes[i].fifo,
458 #if NET_TC_RX_EFFECTIVE_COUNT > 1
459 &rx_classes[i].fifo_slot,
460 #else
461 NULL,
462 #endif
463 NULL,
464 priority, 0, K_FOREVER);
465 if (!tid) {
466 NET_ERR("Cannot create TC handler thread %d", i);
467 continue;
468 }
469
470 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
471 char name[MAX_NAME_LEN];
472
473 snprintk(name, sizeof(name), "rx_q[%d]", i);
474 k_thread_name_set(tid, name);
475 }
476
477 k_thread_start(tid);
478 }
479 #endif
480 }
481