1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP protocol.
8 *
9 * Version: @(#)udp.h 1.0.2 04/28/93
10 *
11 * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 */
13 #ifndef _LINUX_UDP_H
14 #define _LINUX_UDP_H
15
16 #include <net/inet_sock.h>
17 #include <linux/skbuff.h>
18 #include <net/netns/hash.h>
19 #include <uapi/linux/udp.h>
20
udp_hdr(const struct sk_buff * skb)21 static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
22 {
23 return (struct udphdr *)skb_transport_header(skb);
24 }
25
26 #define UDP_HTABLE_SIZE_MIN_PERNET 128
27 #define UDP_HTABLE_SIZE_MIN (IS_ENABLED(CONFIG_BASE_SMALL) ? 128 : 256)
28 #define UDP_HTABLE_SIZE_MAX 65536
29
udp_hashfn(const struct net * net,u32 num,u32 mask)30 static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
31 {
32 return (num + net_hash_mix(net)) & mask;
33 }
34
35 enum {
36 UDP_FLAGS_CORK, /* Cork is required */
37 UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
38 UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
39 UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
40 UDP_FLAGS_ACCEPT_FRAGLIST,
41 UDP_FLAGS_ACCEPT_L4,
42 UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
43 UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
44 UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
45 };
46
47 struct udp_sock {
48 /* inet_sock has to be the first member */
49 struct inet_sock inet;
50 #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
51 #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
52 #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
53
54 unsigned long udp_flags;
55
56 int pending; /* Any pending frames ? */
57 __u8 encap_type; /* Is this an Encapsulation socket? */
58
59 #if !IS_ENABLED(CONFIG_BASE_SMALL)
60 /* For UDP 4-tuple hash */
61 __u16 udp_lrpa_hash;
62 struct hlist_nulls_node udp_lrpa_node;
63 #endif
64
65 /*
66 * Following member retains the information to create a UDP header
67 * when the socket is uncorked.
68 */
69 __u16 len; /* total length of pending frames */
70 __u16 gso_size;
71 /*
72 * Fields specific to UDP-Lite.
73 */
74 __u16 pcslen;
75 __u16 pcrlen;
76 /*
77 * For encapsulation sockets.
78 */
79 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
80 void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, int err,
81 __be16 port, u32 info, u8 *payload);
82 int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
83 void (*encap_destroy)(struct sock *sk);
84
85 /* GRO functions for UDP socket */
86 struct sk_buff * (*gro_receive)(struct sock *sk,
87 struct list_head *head,
88 struct sk_buff *skb);
89 int (*gro_complete)(struct sock *sk,
90 struct sk_buff *skb,
91 int nhoff);
92
93 /* udp_recvmsg try to use this before splicing sk_receive_queue */
94 struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
95
96 /* This field is dirtied by udp_recvmsg() */
97 int forward_deficit;
98
99 /* This fields follows rcvbuf value, and is touched by udp_recvmsg */
100 int forward_threshold;
101
102 /* Cache friendly copy of sk->sk_peek_off >= 0 */
103 bool peeking_with_offset;
104
105 /*
106 * Accounting for the tunnel GRO fastpath.
107 * Unprotected by compilers guard, as it uses space available in
108 * the last UDP socket cacheline.
109 */
110 struct hlist_node tunnel_list;
111 };
112
113 #define udp_test_bit(nr, sk) \
114 test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
115 #define udp_set_bit(nr, sk) \
116 set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
117 #define udp_test_and_set_bit(nr, sk) \
118 test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
119 #define udp_clear_bit(nr, sk) \
120 clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
121 #define udp_assign_bit(nr, sk, val) \
122 assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
123
124 #define UDP_MAX_SEGMENTS (1 << 7UL)
125
126 #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
127
udp_set_peek_off(struct sock * sk,int val)128 static inline int udp_set_peek_off(struct sock *sk, int val)
129 {
130 sk_set_peek_off(sk, val);
131 WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
132 return 0;
133 }
134
udp_set_no_check6_tx(struct sock * sk,bool val)135 static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
136 {
137 udp_assign_bit(NO_CHECK6_TX, sk, val);
138 }
139
udp_set_no_check6_rx(struct sock * sk,bool val)140 static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
141 {
142 udp_assign_bit(NO_CHECK6_RX, sk, val);
143 }
144
udp_get_no_check6_tx(const struct sock * sk)145 static inline bool udp_get_no_check6_tx(const struct sock *sk)
146 {
147 return udp_test_bit(NO_CHECK6_TX, sk);
148 }
149
udp_get_no_check6_rx(const struct sock * sk)150 static inline bool udp_get_no_check6_rx(const struct sock *sk)
151 {
152 return udp_test_bit(NO_CHECK6_RX, sk);
153 }
154
udp_cmsg_recv(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)155 static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
156 struct sk_buff *skb)
157 {
158 int gso_size;
159
160 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
161 gso_size = skb_shinfo(skb)->gso_size;
162 put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
163 }
164 }
165
166 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
167 #if IS_ENABLED(CONFIG_IPV6)
168 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
169 #endif
170
udp_encap_needed(void)171 static inline bool udp_encap_needed(void)
172 {
173 if (static_branch_unlikely(&udp_encap_needed_key))
174 return true;
175
176 #if IS_ENABLED(CONFIG_IPV6)
177 if (static_branch_unlikely(&udpv6_encap_needed_key))
178 return true;
179 #endif
180
181 return false;
182 }
183
udp_unexpected_gso(struct sock * sk,struct sk_buff * skb)184 static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
185 {
186 if (!skb_is_gso(skb))
187 return false;
188
189 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
190 !udp_test_bit(ACCEPT_L4, sk))
191 return true;
192
193 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
194 !udp_test_bit(ACCEPT_FRAGLIST, sk))
195 return true;
196
197 /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
198 * land in a tunnel as the socket check in udp_gro_receive cannot be
199 * foolproof.
200 */
201 if (udp_encap_needed() &&
202 READ_ONCE(udp_sk(sk)->encap_rcv) &&
203 !(skb_shinfo(skb)->gso_type &
204 (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
205 return true;
206
207 return false;
208 }
209
udp_allow_gso(struct sock * sk)210 static inline void udp_allow_gso(struct sock *sk)
211 {
212 udp_set_bit(ACCEPT_L4, sk);
213 udp_set_bit(ACCEPT_FRAGLIST, sk);
214 }
215
216 #define udp_portaddr_for_each_entry(__sk, list) \
217 hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
218
219 #define udp_portaddr_for_each_entry_from(__sk) \
220 hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
221
222 #define udp_portaddr_for_each_entry_rcu(__sk, list) \
223 hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
224
225 #if !IS_ENABLED(CONFIG_BASE_SMALL)
226 #define udp_lrpa_for_each_entry_rcu(__up, node, list) \
227 hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
228 #endif
229
230 #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
231
udp_tunnel_sk(const struct net * net,bool is_ipv6)232 static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
233 {
234 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
235 return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
236 #else
237 return NULL;
238 #endif
239 }
240
241 #endif /* _LINUX_UDP_H */
242