1 /** @file
2 * @brief IPv4 related functions
3 */
4
5 /*
6 * Copyright (c) 2016 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_ipv4, CONFIG_NET_IPV4_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/net/net_core.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/net_stats.h>
18 #include <zephyr/net/net_context.h>
19 #include <zephyr/net/virtual.h>
20 #include <zephyr/net/ethernet.h>
21 #include "net_private.h"
22 #include "connection.h"
23 #include "net_stats.h"
24 #include "icmpv4.h"
25 #include "udp_internal.h"
26 #include "tcp_internal.h"
27 #include "dhcpv4/dhcpv4_internal.h"
28 #include "ipv4.h"
29 #include "pmtu.h"
30
31 BUILD_ASSERT(sizeof(struct in_addr) == NET_IPV4_ADDR_SIZE);
32
33 /* Timeout for various buffer allocations in this file. */
34 #define NET_BUF_TIMEOUT K_MSEC(50)
35
net_ipv4_create_full(struct net_pkt * pkt,const struct in_addr * src,const struct in_addr * dst,uint8_t tos,uint16_t id,uint8_t flags,uint16_t offset)36 int net_ipv4_create_full(struct net_pkt *pkt,
37 const struct in_addr *src,
38 const struct in_addr *dst,
39 uint8_t tos,
40 uint16_t id,
41 uint8_t flags,
42 uint16_t offset)
43 {
44 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
45 struct net_ipv4_hdr *ipv4_hdr;
46
47 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
48 if (!ipv4_hdr) {
49 return -ENOBUFS;
50 }
51
52 ipv4_hdr->vhl = 0x45;
53 ipv4_hdr->tos = tos;
54 ipv4_hdr->len = 0U;
55 ipv4_hdr->id[0] = id >> 8;
56 ipv4_hdr->id[1] = id;
57 ipv4_hdr->offset[0] = (offset >> 8) | (flags << 5);
58 ipv4_hdr->offset[1] = offset;
59
60 ipv4_hdr->ttl = net_pkt_ipv4_ttl(pkt);
61 if (ipv4_hdr->ttl == 0U) {
62 if (net_ipv4_is_addr_mcast(dst)) {
63 if (net_pkt_context(pkt) != NULL) {
64 ipv4_hdr->ttl =
65 net_context_get_ipv4_mcast_ttl(net_pkt_context(pkt));
66 } else {
67 ipv4_hdr->ttl = net_if_ipv4_get_mcast_ttl(net_pkt_iface(pkt));
68 }
69 } else {
70 if (net_pkt_context(pkt) != NULL) {
71 ipv4_hdr->ttl =
72 net_context_get_ipv4_ttl(net_pkt_context(pkt));
73 } else {
74 ipv4_hdr->ttl = net_if_ipv4_get_ttl(net_pkt_iface(pkt));
75 }
76 }
77 }
78
79 ipv4_hdr->proto = 0U;
80 ipv4_hdr->chksum = 0U;
81
82 net_ipv4_addr_copy_raw(ipv4_hdr->dst, (uint8_t *)dst);
83 net_ipv4_addr_copy_raw(ipv4_hdr->src, (uint8_t *)src);
84
85 net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv4_hdr));
86
87 return net_pkt_set_data(pkt, &ipv4_access);
88 }
89
net_ipv4_create(struct net_pkt * pkt,const struct in_addr * src,const struct in_addr * dst)90 int net_ipv4_create(struct net_pkt *pkt,
91 const struct in_addr *src,
92 const struct in_addr *dst)
93 {
94 uint8_t tos = 0;
95 uint8_t flags = 0U;
96
97 if (IS_ENABLED(CONFIG_NET_IP_DSCP_ECN)) {
98 net_ipv4_set_dscp(&tos, net_pkt_ip_dscp(pkt));
99 net_ipv4_set_ecn(&tos, net_pkt_ip_ecn(pkt));
100 }
101
102 if (IS_ENABLED(CONFIG_NET_IPV4_PMTU) && net_pkt_ipv4_pmtu(pkt)) {
103 flags = NET_IPV4_DF;
104 }
105
106 return net_ipv4_create_full(pkt, src, dst, tos, 0U, flags, 0U);
107 }
108
net_ipv4_finalize(struct net_pkt * pkt,uint8_t next_header_proto)109 int net_ipv4_finalize(struct net_pkt *pkt, uint8_t next_header_proto)
110 {
111 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
112 struct net_ipv4_hdr *ipv4_hdr;
113
114 net_pkt_set_overwrite(pkt, true);
115
116 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
117 if (!ipv4_hdr) {
118 return -ENOBUFS;
119 }
120
121 if (IS_ENABLED(CONFIG_NET_IPV4_HDR_OPTIONS)) {
122 if (net_pkt_ipv4_opts_len(pkt)) {
123 ipv4_hdr->vhl = 0x40 | (0x0F &
124 ((net_pkt_ip_hdr_len(pkt) +
125 net_pkt_ipv4_opts_len(pkt)) / 4U));
126 }
127 }
128
129 ipv4_hdr->len = htons(net_pkt_get_len(pkt));
130 ipv4_hdr->proto = next_header_proto;
131
132 if (net_if_need_calc_tx_checksum(net_pkt_iface(pkt), NET_IF_CHECKSUM_IPV4_HEADER)) {
133 ipv4_hdr->chksum = net_calc_chksum_ipv4(pkt);
134 }
135
136 net_pkt_set_data(pkt, &ipv4_access);
137 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_IP);
138
139 if (IS_ENABLED(CONFIG_NET_UDP) &&
140 next_header_proto == IPPROTO_UDP) {
141 return net_udp_finalize(pkt, false);
142 } else if (IS_ENABLED(CONFIG_NET_TCP) &&
143 next_header_proto == IPPROTO_TCP) {
144 return net_tcp_finalize(pkt, false);
145 } else if (next_header_proto == IPPROTO_ICMP) {
146 return net_icmpv4_finalize(pkt, false);
147 }
148
149 return 0;
150 }
151
152 #if defined(CONFIG_NET_IPV4_HDR_OPTIONS)
net_ipv4_parse_hdr_options(struct net_pkt * pkt,net_ipv4_parse_hdr_options_cb_t cb,void * user_data)153 int net_ipv4_parse_hdr_options(struct net_pkt *pkt,
154 net_ipv4_parse_hdr_options_cb_t cb,
155 void *user_data)
156 {
157 struct net_pkt_cursor cur;
158 uint8_t opt_data[NET_IPV4_HDR_OPTNS_MAX_LEN];
159 uint8_t total_opts_len;
160
161 if (!cb) {
162 return -EINVAL;
163 }
164
165 net_pkt_cursor_backup(pkt, &cur);
166 net_pkt_cursor_init(pkt);
167
168 if (net_pkt_skip(pkt, sizeof(struct net_ipv4_hdr))) {
169 return -EINVAL;
170 }
171
172 total_opts_len = net_pkt_ipv4_opts_len(pkt);
173
174 while (total_opts_len) {
175 uint8_t opt_len = 0U;
176 uint8_t opt_type;
177
178 if (net_pkt_read_u8(pkt, &opt_type)) {
179 return -EINVAL;
180 }
181
182 total_opts_len--;
183
184 if (!(opt_type == NET_IPV4_OPTS_EO ||
185 opt_type == NET_IPV4_OPTS_NOP)) {
186 if (net_pkt_read_u8(pkt, &opt_len)) {
187 return -EINVAL;
188 }
189
190 if (opt_len < 2U || total_opts_len < 1U) {
191 return -EINVAL;
192 }
193
194 opt_len -= 2U;
195 total_opts_len--;
196 }
197
198 if (opt_len > total_opts_len) {
199 return -EINVAL;
200 }
201
202 switch (opt_type) {
203 case NET_IPV4_OPTS_NOP:
204 break;
205
206 case NET_IPV4_OPTS_EO:
207 /* Options length should be zero, when cursor reaches to
208 * End of options.
209 */
210 if (total_opts_len) {
211 return -EINVAL;
212 }
213
214 break;
215 case NET_IPV4_OPTS_RR:
216 case NET_IPV4_OPTS_TS:
217 if (net_pkt_read(pkt, opt_data, opt_len)) {
218 return -EINVAL;
219 }
220
221 if (cb(opt_type, opt_data, opt_len, user_data)) {
222 return -EINVAL;
223 }
224
225 break;
226 default:
227 if (net_pkt_skip(pkt, opt_len)) {
228 return -EINVAL;
229 }
230
231 break;
232 }
233
234 total_opts_len -= opt_len;
235 }
236
237 net_pkt_cursor_restore(pkt, &cur);
238
239 return 0;
240 }
241 #endif
242
net_ipv4_input(struct net_pkt * pkt,bool is_loopback)243 enum net_verdict net_ipv4_input(struct net_pkt *pkt, bool is_loopback)
244 {
245 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
246 NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr);
247 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
248 int real_len = net_pkt_get_len(pkt);
249 enum net_verdict verdict = NET_DROP;
250 union net_proto_header proto_hdr;
251 struct net_ipv4_hdr *hdr;
252 union net_ip_header ip;
253 uint8_t hdr_len;
254 uint8_t opts_len;
255 int pkt_len;
256
257 #if defined(CONFIG_NET_L2_IPIP)
258 struct net_pkt_cursor hdr_start;
259
260 net_pkt_cursor_backup(pkt, &hdr_start);
261 #endif
262
263 net_stats_update_ipv4_recv(net_pkt_iface(pkt));
264
265 hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
266 if (!hdr) {
267 NET_DBG("DROP: no buffer");
268 goto drop;
269 }
270
271 hdr_len = (hdr->vhl & NET_IPV4_IHL_MASK) * 4U;
272 if (hdr_len < sizeof(struct net_ipv4_hdr)) {
273 NET_DBG("DROP: Invalid hdr length");
274 goto drop;
275 }
276
277 net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv4_hdr));
278
279 if (IS_ENABLED(CONFIG_NET_IP_DSCP_ECN)) {
280 net_pkt_set_ip_dscp(pkt, net_ipv4_get_dscp(hdr->tos));
281 net_pkt_set_ip_ecn(pkt, net_ipv4_get_ecn(hdr->tos));
282 }
283
284 opts_len = hdr_len - sizeof(struct net_ipv4_hdr);
285 if (opts_len > NET_IPV4_HDR_OPTNS_MAX_LEN) {
286 return -EINVAL;
287 }
288
289 if (hdr->ttl == 0) {
290 goto drop;
291 }
292
293 net_pkt_set_ipv4_opts_len(pkt, opts_len);
294
295 pkt_len = ntohs(hdr->len);
296 if (real_len < pkt_len) {
297 NET_DBG("DROP: pkt len per hdr %d != pkt real len %d",
298 pkt_len, real_len);
299 goto drop;
300 } else if (real_len > pkt_len) {
301 net_pkt_update_length(pkt, pkt_len);
302 }
303
304 if (!is_loopback) {
305 if (net_ipv4_is_addr_loopback_raw(hdr->dst) ||
306 net_ipv4_is_addr_loopback_raw(hdr->src)) {
307 NET_DBG("DROP: localhost packet");
308 goto drop;
309 }
310
311 if (net_ipv4_is_my_addr_raw(hdr->src)) {
312 NET_DBG("DROP: src addr is %s", "mine");
313 goto drop;
314 }
315 }
316
317 if (net_ipv4_is_addr_mcast_raw(hdr->src)) {
318 NET_DBG("DROP: src addr is %s", "mcast");
319 goto drop;
320 }
321
322 if (net_ipv4_is_addr_bcast_raw(net_pkt_iface(pkt), hdr->src)) {
323 NET_DBG("DROP: src addr is %s", "bcast");
324 goto drop;
325 }
326
327 if (net_ipv4_is_addr_unspecified_raw(hdr->src) &&
328 !net_ipv4_is_addr_bcast_raw(net_pkt_iface(pkt), hdr->dst) &&
329 (hdr->proto != IPPROTO_IGMP)) {
330 NET_DBG("DROP: src addr is %s", "unspecified");
331 goto drop;
332 }
333
334 if (net_if_need_calc_rx_checksum(net_pkt_iface(pkt), NET_IF_CHECKSUM_IPV4_HEADER) &&
335 net_calc_chksum_ipv4(pkt) != 0U) {
336 NET_DBG("DROP: invalid chksum");
337 goto drop;
338 }
339
340 net_pkt_set_ipv4_ttl(pkt, hdr->ttl);
341
342 net_pkt_set_family(pkt, PF_INET);
343
344 if (!net_pkt_filter_ip_recv_ok(pkt)) {
345 /* drop the packet */
346 net_stats_update_filter_rx_ipv4_drop(net_pkt_iface(pkt));
347 return NET_DROP;
348 }
349
350 if ((!net_ipv4_is_my_addr_raw(hdr->dst) &&
351 !net_ipv4_is_addr_mcast_raw(hdr->dst) &&
352 !(hdr->proto == IPPROTO_UDP &&
353 (net_ipv4_addr_cmp_raw(hdr->dst, net_ipv4_broadcast_address()->s4_addr) ||
354 /* RFC 1122 ch. 3.3.6 The 0.0.0.0 is non-standard bcast addr */
355 (IS_ENABLED(CONFIG_NET_IPV4_ACCEPT_ZERO_BROADCAST) &&
356 net_ipv4_addr_cmp_raw(hdr->dst,
357 net_ipv4_unspecified_address()->s4_addr)) ||
358 net_dhcpv4_accept_unicast(pkt)))) ||
359 (hdr->proto == IPPROTO_TCP &&
360 net_ipv4_is_addr_bcast_raw(net_pkt_iface(pkt), hdr->dst))) {
361 NET_DBG("DROP: not for me");
362 goto drop;
363 }
364
365 net_pkt_acknowledge_data(pkt, &ipv4_access);
366
367 if (opts_len) {
368 /* Only few options are handled in EchoRequest, rest skipped */
369 if (net_pkt_skip(pkt, opts_len)) {
370 NET_DBG("Header too big? %u", hdr_len);
371 goto drop;
372 }
373 }
374
375 if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT)) {
376 /* Check if this is a fragmented packet, and if so, handle reassembly */
377 if ((ntohs(*((uint16_t *)&hdr->offset[0])) &
378 (NET_IPV4_FRAGH_OFFSET_MASK | NET_IPV4_MORE_FRAG_MASK)) != 0) {
379 return net_ipv4_handle_fragment_hdr(pkt, hdr);
380 }
381 }
382
383 NET_DBG("IPv4 packet received from %s to %s",
384 net_sprint_ipv4_addr(&hdr->src),
385 net_sprint_ipv4_addr(&hdr->dst));
386
387 ip.ipv4 = hdr;
388
389 if (IS_ENABLED(CONFIG_NET_SOCKETS_INET_RAW)) {
390 if (net_conn_raw_ip_input(pkt, &ip, hdr->proto) == NET_DROP) {
391 goto drop;
392 }
393 }
394
395 switch (hdr->proto) {
396 case IPPROTO_ICMP:
397 verdict = net_icmpv4_input(pkt, hdr);
398 if (verdict == NET_DROP) {
399 goto drop;
400 }
401 return verdict;
402 #if defined(CONFIG_NET_IPV4_IGMP)
403 case IPPROTO_IGMP:
404 verdict = net_ipv4_igmp_input(pkt, hdr);
405 if (verdict == NET_DROP) {
406 goto drop;
407 }
408 return verdict;
409 #endif
410 case IPPROTO_TCP:
411 proto_hdr.tcp = net_tcp_input(pkt, &tcp_access);
412 if (proto_hdr.tcp) {
413 verdict = NET_OK;
414 }
415 break;
416 case IPPROTO_UDP:
417 proto_hdr.udp = net_udp_input(pkt, &udp_access);
418 if (proto_hdr.udp) {
419 verdict = NET_OK;
420 }
421 break;
422
423 #if defined(CONFIG_NET_L2_IPIP)
424 case IPPROTO_IPV6:
425 case IPPROTO_IPIP: {
426 struct sockaddr_in remote_addr = { 0 };
427 struct net_if *tunnel_iface;
428
429 remote_addr.sin_family = AF_INET;
430 net_ipv4_addr_copy_raw((uint8_t *)&remote_addr.sin_addr, hdr->src);
431
432 net_pkt_set_remote_address(pkt, (struct sockaddr *)&remote_addr,
433 sizeof(struct sockaddr_in));
434
435 /* Get rid of the old IP header */
436 net_pkt_cursor_restore(pkt, &hdr_start);
437 net_pkt_pull(pkt, net_pkt_ip_hdr_len(pkt) +
438 net_pkt_ipv4_opts_len(pkt));
439
440 tunnel_iface = net_ipip_get_virtual_interface(net_pkt_iface(pkt));
441 if (tunnel_iface != NULL && net_if_l2(tunnel_iface)->recv != NULL) {
442 return net_if_l2(tunnel_iface)->recv(net_pkt_iface(pkt), pkt);
443 }
444 }
445 #endif
446 }
447
448 if (verdict == NET_DROP) {
449 goto drop;
450 }
451
452 verdict = net_conn_input(pkt, &ip, hdr->proto, &proto_hdr);
453 if (verdict != NET_DROP) {
454 return verdict;
455 }
456
457 drop:
458 net_stats_update_ipv4_drop(net_pkt_iface(pkt));
459 return NET_DROP;
460 }
461
net_ipv4_prepare_for_send(struct net_pkt * pkt)462 enum net_verdict net_ipv4_prepare_for_send(struct net_pkt *pkt)
463 {
464 if (IS_ENABLED(CONFIG_NET_IPV4_PMTU)) {
465 struct net_pmtu_entry *entry;
466 struct sockaddr_in dst = {
467 .sin_family = AF_INET,
468 };
469 int ret;
470
471 net_ipv4_addr_copy_raw((uint8_t *)&dst.sin_addr,
472 NET_IPV4_HDR(pkt)->dst);
473 entry = net_pmtu_get_entry((struct sockaddr *)&dst);
474 if (entry == NULL) {
475 ret = net_pmtu_update_mtu((struct sockaddr *)&dst,
476 net_if_get_mtu(net_pkt_iface(pkt)));
477 if (ret < 0) {
478 NET_DBG("Cannot update PMTU for %s (%d)",
479 net_sprint_ipv4_addr(&dst.sin_addr),
480 ret);
481 }
482 }
483 }
484
485 #if defined(CONFIG_NET_IPV4_FRAGMENT)
486 return net_ipv4_prepare_for_send_fragment(pkt);
487 #else
488 return NET_OK;
489 #endif
490 }
491
net_ipv4_init(void)492 void net_ipv4_init(void)
493 {
494 if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT)) {
495 net_ipv4_setup_fragment_buffers();
496 }
497
498 if (IS_ENABLED(CONFIG_NET_IPV4_ACD)) {
499 net_ipv4_acd_init();
500 }
501 }
502