1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <vmlinux.h>
4 #include "bpf_experimental.h"
5 #include "bpf_qdisc_common.h"
6 
7 char _license[] SEC("license") = "GPL";
8 
9 struct skb_node {
10 	struct sk_buff __kptr * skb;
11 	struct bpf_list_node node;
12 };
13 
14 private(A) struct bpf_spin_lock q_fifo_lock;
15 private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
16 
17 bool init_called;
18 
19 SEC("struct_ops/bpf_fifo_enqueue")
BPF_PROG(bpf_fifo_enqueue,struct sk_buff * skb,struct Qdisc * sch,struct bpf_sk_buff_ptr * to_free)20 int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch,
21 	     struct bpf_sk_buff_ptr *to_free)
22 {
23 	struct skb_node *skbn;
24 	u32 pkt_len;
25 
26 	if (sch->q.qlen == sch->limit)
27 		goto drop;
28 
29 	skbn = bpf_obj_new(typeof(*skbn));
30 	if (!skbn)
31 		goto drop;
32 
33 	pkt_len = qdisc_pkt_len(skb);
34 
35 	sch->q.qlen++;
36 	skb = bpf_kptr_xchg(&skbn->skb, skb);
37 	if (skb)
38 		bpf_qdisc_skb_drop(skb, to_free);
39 
40 	bpf_spin_lock(&q_fifo_lock);
41 	bpf_list_push_back(&q_fifo, &skbn->node);
42 	bpf_spin_unlock(&q_fifo_lock);
43 
44 	sch->qstats.backlog += pkt_len;
45 	return NET_XMIT_SUCCESS;
46 drop:
47 	bpf_qdisc_skb_drop(skb, to_free);
48 	return NET_XMIT_DROP;
49 }
50 
51 SEC("struct_ops/bpf_fifo_dequeue")
BPF_PROG(bpf_fifo_dequeue,struct Qdisc * sch)52 struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch)
53 {
54 	struct bpf_list_node *node;
55 	struct sk_buff *skb = NULL;
56 	struct skb_node *skbn;
57 
58 	bpf_spin_lock(&q_fifo_lock);
59 	node = bpf_list_pop_front(&q_fifo);
60 	bpf_spin_unlock(&q_fifo_lock);
61 	if (!node)
62 		return NULL;
63 
64 	skbn = container_of(node, struct skb_node, node);
65 	skb = bpf_kptr_xchg(&skbn->skb, skb);
66 	bpf_obj_drop(skbn);
67 	if (!skb)
68 		return NULL;
69 
70 	sch->qstats.backlog -= qdisc_pkt_len(skb);
71 	bpf_qdisc_bstats_update(sch, skb);
72 	sch->q.qlen--;
73 
74 	return skb;
75 }
76 
77 SEC("struct_ops/bpf_fifo_init")
BPF_PROG(bpf_fifo_init,struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)78 int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt,
79 	     struct netlink_ext_ack *extack)
80 {
81 	sch->limit = 1000;
82 	init_called = true;
83 	return 0;
84 }
85 
86 SEC("struct_ops/bpf_fifo_reset")
BPF_PROG(bpf_fifo_reset,struct Qdisc * sch)87 void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch)
88 {
89 	struct bpf_list_node *node;
90 	struct skb_node *skbn;
91 	int i;
92 
93 	bpf_for(i, 0, sch->q.qlen) {
94 		struct sk_buff *skb = NULL;
95 
96 		bpf_spin_lock(&q_fifo_lock);
97 		node = bpf_list_pop_front(&q_fifo);
98 		bpf_spin_unlock(&q_fifo_lock);
99 
100 		if (!node)
101 			break;
102 
103 		skbn = container_of(node, struct skb_node, node);
104 		skb = bpf_kptr_xchg(&skbn->skb, skb);
105 		if (skb)
106 			bpf_kfree_skb(skb);
107 		bpf_obj_drop(skbn);
108 	}
109 	sch->q.qlen = 0;
110 }
111 
112 SEC("struct_ops")
BPF_PROG(bpf_fifo_destroy,struct Qdisc * sch)113 void BPF_PROG(bpf_fifo_destroy, struct Qdisc *sch)
114 {
115 }
116 
117 SEC(".struct_ops")
118 struct Qdisc_ops fifo = {
119 	.enqueue   = (void *)bpf_fifo_enqueue,
120 	.dequeue   = (void *)bpf_fifo_dequeue,
121 	.init      = (void *)bpf_fifo_init,
122 	.reset     = (void *)bpf_fifo_reset,
123 	.destroy   = (void *)bpf_fifo_destroy,
124 	.id        = "bpf_fifo",
125 };
126 
127