1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/if_arp.h>
4 
5 #include <net/6lowpan.h>
6 #include <net/mac802154.h>
7 #include <net/ieee802154_netdev.h>
8 
9 #include "6lowpan_i.h"
10 
11 #define LOWPAN_DISPATCH_FIRST		0xc0
12 #define LOWPAN_DISPATCH_FRAG_MASK	0xf8
13 
14 #define LOWPAN_DISPATCH_NALP		0x00
15 #define LOWPAN_DISPATCH_ESC		0x40
16 #define LOWPAN_DISPATCH_HC1		0x42
17 #define LOWPAN_DISPATCH_DFF		0x43
18 #define LOWPAN_DISPATCH_BC0		0x50
19 #define LOWPAN_DISPATCH_MESH		0x80
20 
lowpan_give_skb_to_device(struct sk_buff * skb)21 static int lowpan_give_skb_to_device(struct sk_buff *skb)
22 {
23 	skb->protocol = htons(ETH_P_IPV6);
24 	skb->dev->stats.rx_packets++;
25 	skb->dev->stats.rx_bytes += skb->len;
26 
27 	return netif_rx(skb);
28 }
29 
lowpan_rx_handlers_result(struct sk_buff * skb,lowpan_rx_result res)30 static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
31 {
32 	switch (res) {
33 	case RX_CONTINUE:
34 		/* nobody cared about this packet */
35 		net_warn_ratelimited("%s: received unknown dispatch\n",
36 				     __func__);
37 
38 		fallthrough;
39 	case RX_DROP_UNUSABLE:
40 		kfree_skb(skb);
41 
42 		fallthrough;
43 	case RX_DROP:
44 		return NET_RX_DROP;
45 	case RX_QUEUED:
46 		return lowpan_give_skb_to_device(skb);
47 	default:
48 		break;
49 	}
50 
51 	return NET_RX_DROP;
52 }
53 
lowpan_is_frag1(u8 dispatch)54 static inline bool lowpan_is_frag1(u8 dispatch)
55 {
56 	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
57 }
58 
lowpan_is_fragn(u8 dispatch)59 static inline bool lowpan_is_fragn(u8 dispatch)
60 {
61 	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
62 }
63 
lowpan_rx_h_frag(struct sk_buff * skb)64 static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
65 {
66 	int ret;
67 
68 	if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
69 	      lowpan_is_fragn(*skb_network_header(skb))))
70 		return RX_CONTINUE;
71 
72 	ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
73 			      LOWPAN_DISPATCH_FRAG_MASK);
74 	if (ret == 1)
75 		return RX_QUEUED;
76 
77 	/* Packet is freed by lowpan_frag_rcv on error or put into the frag
78 	 * bucket.
79 	 */
80 	return RX_DROP;
81 }
82 
lowpan_iphc_decompress(struct sk_buff * skb)83 int lowpan_iphc_decompress(struct sk_buff *skb)
84 {
85 	struct ieee802154_hdr hdr;
86 
87 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
88 		return -EINVAL;
89 
90 	return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
91 }
92 
lowpan_rx_h_iphc(struct sk_buff * skb)93 static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
94 {
95 	int ret;
96 
97 	if (!lowpan_is_iphc(*skb_network_header(skb)))
98 		return RX_CONTINUE;
99 
100 	/* Setting datagram_offset to zero indicates non frag handling
101 	 * while doing lowpan_header_decompress.
102 	 */
103 	lowpan_802154_cb(skb)->d_size = 0;
104 
105 	ret = lowpan_iphc_decompress(skb);
106 	if (ret < 0)
107 		return RX_DROP_UNUSABLE;
108 
109 	return RX_QUEUED;
110 }
111 
lowpan_rx_h_ipv6(struct sk_buff * skb)112 lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
113 {
114 	if (!lowpan_is_ipv6(*skb_network_header(skb)))
115 		return RX_CONTINUE;
116 
117 	/* Pull off the 1-byte of 6lowpan header. */
118 	skb_pull(skb, 1);
119 	return RX_QUEUED;
120 }
121 
lowpan_is_esc(u8 dispatch)122 static inline bool lowpan_is_esc(u8 dispatch)
123 {
124 	return dispatch == LOWPAN_DISPATCH_ESC;
125 }
126 
lowpan_rx_h_esc(struct sk_buff * skb)127 static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
128 {
129 	if (!lowpan_is_esc(*skb_network_header(skb)))
130 		return RX_CONTINUE;
131 
132 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
133 			     "6LoWPAN ESC not supported\n");
134 
135 	return RX_DROP_UNUSABLE;
136 }
137 
lowpan_is_hc1(u8 dispatch)138 static inline bool lowpan_is_hc1(u8 dispatch)
139 {
140 	return dispatch == LOWPAN_DISPATCH_HC1;
141 }
142 
lowpan_rx_h_hc1(struct sk_buff * skb)143 static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
144 {
145 	if (!lowpan_is_hc1(*skb_network_header(skb)))
146 		return RX_CONTINUE;
147 
148 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
149 			     "6LoWPAN HC1 not supported\n");
150 
151 	return RX_DROP_UNUSABLE;
152 }
153 
lowpan_is_dff(u8 dispatch)154 static inline bool lowpan_is_dff(u8 dispatch)
155 {
156 	return dispatch == LOWPAN_DISPATCH_DFF;
157 }
158 
lowpan_rx_h_dff(struct sk_buff * skb)159 static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
160 {
161 	if (!lowpan_is_dff(*skb_network_header(skb)))
162 		return RX_CONTINUE;
163 
164 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
165 			     "6LoWPAN DFF not supported\n");
166 
167 	return RX_DROP_UNUSABLE;
168 }
169 
lowpan_is_bc0(u8 dispatch)170 static inline bool lowpan_is_bc0(u8 dispatch)
171 {
172 	return dispatch == LOWPAN_DISPATCH_BC0;
173 }
174 
lowpan_rx_h_bc0(struct sk_buff * skb)175 static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
176 {
177 	if (!lowpan_is_bc0(*skb_network_header(skb)))
178 		return RX_CONTINUE;
179 
180 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
181 			     "6LoWPAN BC0 not supported\n");
182 
183 	return RX_DROP_UNUSABLE;
184 }
185 
lowpan_is_mesh(u8 dispatch)186 static inline bool lowpan_is_mesh(u8 dispatch)
187 {
188 	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
189 }
190 
lowpan_rx_h_mesh(struct sk_buff * skb)191 static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
192 {
193 	if (!lowpan_is_mesh(*skb_network_header(skb)))
194 		return RX_CONTINUE;
195 
196 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
197 			     "6LoWPAN MESH not supported\n");
198 
199 	return RX_DROP_UNUSABLE;
200 }
201 
lowpan_invoke_rx_handlers(struct sk_buff * skb)202 static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
203 {
204 	lowpan_rx_result res;
205 
206 #define CALL_RXH(rxh)			\
207 	do {				\
208 		res = rxh(skb);	\
209 		if (res != RX_CONTINUE)	\
210 			goto rxh_next;	\
211 	} while (0)
212 
213 	/* likely at first */
214 	CALL_RXH(lowpan_rx_h_iphc);
215 	CALL_RXH(lowpan_rx_h_frag);
216 	CALL_RXH(lowpan_rx_h_ipv6);
217 	CALL_RXH(lowpan_rx_h_esc);
218 	CALL_RXH(lowpan_rx_h_hc1);
219 	CALL_RXH(lowpan_rx_h_dff);
220 	CALL_RXH(lowpan_rx_h_bc0);
221 	CALL_RXH(lowpan_rx_h_mesh);
222 
223 rxh_next:
224 	return lowpan_rx_handlers_result(skb, res);
225 #undef CALL_RXH
226 }
227 
lowpan_is_nalp(u8 dispatch)228 static inline bool lowpan_is_nalp(u8 dispatch)
229 {
230 	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
231 }
232 
233 /* Lookup for reserved dispatch values at:
234  * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
235  *
236  * Last Updated: 2015-01-22
237  */
lowpan_is_reserved(u8 dispatch)238 static inline bool lowpan_is_reserved(u8 dispatch)
239 {
240 	return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
241 		(dispatch >= 0x51 && dispatch <= 0x5F) ||
242 		(dispatch >= 0xc8 && dispatch <= 0xdf) ||
243 		dispatch >= 0xe8);
244 }
245 
246 /* lowpan_rx_h_check checks on generic 6LoWPAN requirements
247  * in MAC and 6LoWPAN header.
248  *
249  * Don't manipulate the skb here, it could be shared buffer.
250  */
lowpan_rx_h_check(struct sk_buff * skb)251 static inline bool lowpan_rx_h_check(struct sk_buff *skb)
252 {
253 	__le16 fc = ieee802154_get_fc_from_skb(skb);
254 
255 	/* check on ieee802154 conform 6LoWPAN header */
256 	if (!ieee802154_is_data(fc) ||
257 	    !ieee802154_skb_is_intra_pan_addressing(fc, skb))
258 		return false;
259 
260 	/* check if we can dereference the dispatch */
261 	if (unlikely(!skb->len))
262 		return false;
263 
264 	if (lowpan_is_nalp(*skb_network_header(skb)) ||
265 	    lowpan_is_reserved(*skb_network_header(skb)))
266 		return false;
267 
268 	return true;
269 }
270 
lowpan_rcv(struct sk_buff * skb,struct net_device * wdev,struct packet_type * pt,struct net_device * orig_wdev)271 static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
272 		      struct packet_type *pt, struct net_device *orig_wdev)
273 {
274 	struct net_device *ldev;
275 
276 	if (wdev->type != ARPHRD_IEEE802154 ||
277 	    skb->pkt_type == PACKET_OTHERHOST ||
278 	    !lowpan_rx_h_check(skb))
279 		goto drop;
280 
281 	ldev = wdev->ieee802154_ptr->lowpan_dev;
282 	if (!ldev || !netif_running(ldev))
283 		goto drop;
284 
285 	/* Replacing skb->dev and followed rx handlers will manipulate skb. */
286 	skb = skb_share_check(skb, GFP_ATOMIC);
287 	if (!skb)
288 		goto out;
289 	skb->dev = ldev;
290 
291 	/* When receive frag1 it's likely that we manipulate the buffer.
292 	 * When recevie iphc we manipulate the data buffer. So we need
293 	 * to unshare the buffer.
294 	 */
295 	if (lowpan_is_frag1(*skb_network_header(skb)) ||
296 	    lowpan_is_iphc(*skb_network_header(skb))) {
297 		skb = skb_unshare(skb, GFP_ATOMIC);
298 		if (!skb)
299 			goto out;
300 	}
301 
302 	return lowpan_invoke_rx_handlers(skb);
303 
304 drop:
305 	kfree_skb(skb);
306 out:
307 	return NET_RX_DROP;
308 }
309 
310 static struct packet_type lowpan_packet_type = {
311 	.type = htons(ETH_P_IEEE802154),
312 	.func = lowpan_rcv,
313 };
314 
lowpan_rx_init(void)315 void lowpan_rx_init(void)
316 {
317 	dev_add_pack(&lowpan_packet_type);
318 }
319 
lowpan_rx_exit(void)320 void lowpan_rx_exit(void)
321 {
322 	dev_remove_pack(&lowpan_packet_type);
323 }
324