1 /** @file
2 * @brief ICMPv4 related functions
3 */
4
5 /*
6 * Copyright (c) 2016 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_icmpv4, CONFIG_NET_ICMPV4_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/sys/slist.h>
16 #include <zephyr/net/net_core.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/net_if.h>
19 #include <zephyr/net/icmp.h>
20 #include "net_private.h"
21 #include "ipv4.h"
22 #include "icmpv4.h"
23 #include "net_stats.h"
24 #include "pmtu.h"
25
26 #define PKT_WAIT_TIME K_SECONDS(1)
27
28 struct net_icmpv4_hdr_opts_data {
29 struct net_pkt *reply;
30 const struct in_addr *src;
31 };
32
net_icmpv4_create(struct net_pkt * pkt,uint8_t icmp_type,uint8_t icmp_code)33 int net_icmpv4_create(struct net_pkt *pkt, uint8_t icmp_type, uint8_t icmp_code)
34 {
35 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(icmpv4_access,
36 struct net_icmp_hdr);
37 struct net_icmp_hdr *icmp_hdr;
38
39 icmp_hdr = (struct net_icmp_hdr *)net_pkt_get_data(pkt, &icmpv4_access);
40 if (!icmp_hdr) {
41 return -ENOBUFS;
42 }
43
44 icmp_hdr->type = icmp_type;
45 icmp_hdr->code = icmp_code;
46 icmp_hdr->chksum = 0U;
47
48 return net_pkt_set_data(pkt, &icmpv4_access);
49 }
50
net_icmpv4_finalize(struct net_pkt * pkt,bool force_chksum)51 int net_icmpv4_finalize(struct net_pkt *pkt, bool force_chksum)
52 {
53 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(icmpv4_access,
54 struct net_icmp_hdr);
55 struct net_icmp_hdr *icmp_hdr;
56
57 if (IS_ENABLED(CONFIG_NET_IPV4_HDR_OPTIONS)) {
58 if (net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
59 return -ENOBUFS;
60 }
61 }
62
63 icmp_hdr = (struct net_icmp_hdr *)net_pkt_get_data(pkt, &icmpv4_access);
64 if (!icmp_hdr) {
65 return -ENOBUFS;
66 }
67
68 icmp_hdr->chksum = 0U;
69 if (net_if_need_calc_tx_checksum(net_pkt_iface(pkt), NET_IF_CHECKSUM_IPV4_ICMP) ||
70 force_chksum) {
71 icmp_hdr->chksum = net_calc_chksum_icmpv4(pkt);
72 net_pkt_set_chksum_done(pkt, true);
73 }
74
75 return net_pkt_set_data(pkt, &icmpv4_access);
76 }
77
78 #if defined(CONFIG_NET_IPV4_HDR_OPTIONS)
79
80 /* Parse Record Route and add our own IP address based on
81 * free entries.
82 */
icmpv4_update_record_route(uint8_t * opt_data,uint8_t opt_len,struct net_pkt * reply,const struct in_addr * src)83 static int icmpv4_update_record_route(uint8_t *opt_data,
84 uint8_t opt_len,
85 struct net_pkt *reply,
86 const struct in_addr *src)
87 {
88 uint8_t len = net_pkt_ipv4_opts_len(reply);
89 uint8_t addr_len = sizeof(struct in_addr);
90 uint8_t ptr_offset = 4U;
91 uint8_t offset = 0U;
92 uint8_t skip;
93 uint8_t ptr;
94
95 if (net_pkt_write_u8(reply, NET_IPV4_OPTS_RR)) {
96 goto drop;
97 }
98
99 len++;
100
101 if (net_pkt_write_u8(reply, opt_len + 2U)) {
102 goto drop;
103 }
104
105 len++;
106
107 /* The third octet is the pointer into the route data
108 * indicating the octet which begins the next area to
109 * store a route address. The pointer is relative to
110 * this option, and the smallest legal value for the
111 * pointer is 4.
112 */
113 ptr = opt_data[offset++];
114
115 /* If the route data area is already full (the pointer exceeds
116 * the length) the datagram is forwarded without inserting the
117 * address into the recorded route.
118 */
119 if (ptr >= opt_len) {
120 /* No free entry to update RecordRoute */
121 if (net_pkt_write_u8(reply, ptr)) {
122 goto drop;
123 }
124
125 len++;
126
127 if (net_pkt_write(reply, opt_data + offset, opt_len)) {
128 goto drop;
129 }
130
131 len += opt_len;
132
133 net_pkt_set_ipv4_opts_len(reply, len);
134
135 return 0;
136 }
137
138 /* If there is some room but not enough room for a full address
139 * to be inserted, the original datagram is considered to be in
140 * error and is discarded.
141 */
142 if ((ptr + addr_len) > opt_len) {
143 goto drop;
144 }
145
146 /* So, there is a free entry to update Record Route */
147 if (net_pkt_write_u8(reply, ptr + addr_len)) {
148 goto drop;
149 }
150
151 len++;
152
153 skip = ptr - ptr_offset;
154 if (skip) {
155 /* Do not alter existed routes */
156 if (net_pkt_write(reply, opt_data + offset, skip)) {
157 goto drop;
158 }
159
160 offset += skip;
161 len += skip;
162 }
163
164 if (net_pkt_write(reply, (void *)src, addr_len)) {
165 goto drop;
166 }
167
168 len += addr_len;
169 offset += addr_len;
170
171 if (opt_len > offset) {
172 if (net_pkt_write(reply, opt_data + offset, opt_len - offset)) {
173 goto drop;
174 }
175 }
176
177 len += opt_len - offset;
178
179 net_pkt_set_ipv4_opts_len(reply, len);
180
181 return 0;
182
183 drop:
184 return -EINVAL;
185 }
186
187 /* TODO: Timestamp value should updated, as per RFC 791
188 * Internet Timestamp. Timestamp value : 32-bit timestamp
189 * in milliseconds since midnight UT.
190 */
icmpv4_update_time_stamp(uint8_t * opt_data,uint8_t opt_len,struct net_pkt * reply,const struct in_addr * src)191 static int icmpv4_update_time_stamp(uint8_t *opt_data,
192 uint8_t opt_len,
193 struct net_pkt *reply,
194 const struct in_addr *src)
195 {
196 uint8_t len = net_pkt_ipv4_opts_len(reply);
197 uint8_t addr_len = sizeof(struct in_addr);
198 uint8_t ptr_offset = 5U;
199 uint8_t offset = 0U;
200 uint8_t new_entry_len;
201 uint8_t overflow;
202 uint8_t flag;
203 uint8_t skip;
204 uint8_t ptr;
205
206 if (net_pkt_write_u8(reply, NET_IPV4_OPTS_TS)) {
207 goto drop;
208 }
209
210 len++;
211
212 if (net_pkt_write_u8(reply, opt_len + 2U)) {
213 goto drop;
214 }
215
216 len++;
217
218 /* The Pointer is the number of octets from the beginning of
219 * this option to the end of timestamps plus one (i.e., it
220 * points to the octet beginning the space for next timestamp).
221 * The smallest legal value is 5. The timestamp area is full
222 * when the pointer is greater than the length.
223 */
224 ptr = opt_data[offset++];
225 flag = opt_data[offset++];
226
227 flag = flag & 0x0F;
228 overflow = (flag & 0xF0) >> 4U;
229
230 /* If the timestamp data area is already full (the pointer
231 * exceeds the length) the datagram is forwarded without
232 * inserting the timestamp, but the overflow count is
233 * incremented by one.
234 */
235 if (ptr >= opt_len) {
236 /* overflow count itself overflows, the original datagram
237 * is considered to be in error and is discarded.
238 */
239 if (overflow == 0x0F) {
240 goto drop;
241 }
242
243 /* No free entry to update Timestamp data */
244 if (net_pkt_write_u8(reply, ptr)) {
245 goto drop;
246 }
247
248 len++;
249
250 overflow++;
251 flag = (overflow << 4U) | flag;
252
253 if (net_pkt_write_u8(reply, flag)) {
254 goto drop;
255 }
256
257 len++;
258
259 if (net_pkt_write(reply, opt_data + offset, opt_len)) {
260 goto drop;
261 }
262
263 len += opt_len;
264
265 net_pkt_set_ipv4_opts_len(reply, len);
266
267 return 0;
268 }
269
270 switch (flag) {
271 case NET_IPV4_TS_OPT_TS_ONLY:
272 new_entry_len = sizeof(uint32_t);
273 break;
274 case NET_IPV4_TS_OPT_TS_ADDR:
275 new_entry_len = addr_len + sizeof(uint32_t);
276 break;
277 case NET_IPV4_TS_OPT_TS_PRES: /* TODO */
278 default:
279 goto drop;
280 }
281
282 /* So, there is a free entry to update Timestamp */
283 if (net_pkt_write_u8(reply, ptr + new_entry_len)) {
284 goto drop;
285 }
286
287 len++;
288
289 if (net_pkt_write_u8(reply, (overflow << 4) | flag)) {
290 goto drop;
291 }
292
293 len++;
294
295 skip = ptr - ptr_offset;
296 if (skip) {
297 /* Do not alter existed routes */
298 if (net_pkt_write(reply, opt_data + offset, skip)) {
299 goto drop;
300 }
301
302 len += skip;
303 offset += skip;
304 }
305
306 switch (flag) {
307 case NET_IPV4_TS_OPT_TS_ONLY:
308 if (net_pkt_write_be32(reply, htons(k_uptime_get_32()))) {
309 goto drop;
310 }
311
312 len += sizeof(uint32_t);
313
314 offset += sizeof(uint32_t);
315
316 break;
317 case NET_IPV4_TS_OPT_TS_ADDR:
318 if (net_pkt_write(reply, (void *)src, addr_len)) {
319 goto drop;
320 }
321
322 len += addr_len;
323
324 if (net_pkt_write_be32(reply, htons(k_uptime_get_32()))) {
325 goto drop;
326 }
327
328 len += sizeof(uint32_t);
329
330 offset += (addr_len + sizeof(uint32_t));
331
332 break;
333 }
334
335 if (opt_len > offset) {
336 if (net_pkt_write(reply, opt_data + offset, opt_len - offset)) {
337 goto drop;
338 }
339 }
340
341 len += opt_len - offset;
342
343 net_pkt_set_ipv4_opts_len(reply, len);
344
345 return 0;
346
347 drop:
348 return -EINVAL;
349 }
350
icmpv4_reply_to_options(uint8_t opt_type,uint8_t * opt_data,uint8_t opt_len,void * user_data)351 static int icmpv4_reply_to_options(uint8_t opt_type,
352 uint8_t *opt_data,
353 uint8_t opt_len,
354 void *user_data)
355 {
356 struct net_icmpv4_hdr_opts_data *ud =
357 (struct net_icmpv4_hdr_opts_data *)user_data;
358
359 if (opt_type == NET_IPV4_OPTS_RR) {
360 return icmpv4_update_record_route(opt_data, opt_len,
361 ud->reply, ud->src);
362 } else if (opt_type == NET_IPV4_OPTS_TS) {
363 return icmpv4_update_time_stamp(opt_data, opt_len,
364 ud->reply, ud->src);
365 }
366
367 return 0;
368 }
369
icmpv4_handle_header_options(struct net_pkt * pkt,struct net_pkt * reply,const struct in_addr * src)370 static int icmpv4_handle_header_options(struct net_pkt *pkt,
371 struct net_pkt *reply,
372 const struct in_addr *src)
373 {
374 struct net_icmpv4_hdr_opts_data ud;
375 uint8_t len;
376
377 ud.reply = reply;
378 ud.src = src;
379
380 if (net_ipv4_parse_hdr_options(pkt, icmpv4_reply_to_options, &ud)) {
381 return -EINVAL;
382 }
383
384 len = net_pkt_ipv4_opts_len(reply);
385
386 /* IPv4 optional header part should ends in 32 bit boundary */
387 if (len % 4U != 0U) {
388 uint8_t i = 4U - (len % 4U);
389
390 if (net_pkt_memset(reply, NET_IPV4_OPTS_NOP, i)) {
391 return -EINVAL;
392 }
393
394 len += i;
395 }
396
397 /* Options are added now, update the header length. */
398 net_pkt_set_ipv4_opts_len(reply, len);
399
400 return 0;
401 }
402 #else
icmpv4_handle_header_options(struct net_pkt * pkt,struct net_pkt * reply,const struct in_addr * src)403 static int icmpv4_handle_header_options(struct net_pkt *pkt,
404 struct net_pkt *reply,
405 const struct in_addr *src)
406 {
407 ARG_UNUSED(pkt);
408 ARG_UNUSED(reply);
409 ARG_UNUSED(src);
410
411 return 0;
412 }
413 #endif
414
icmpv4_handle_echo_request(struct net_icmp_ctx * ctx,struct net_pkt * pkt,struct net_icmp_ip_hdr * hdr,struct net_icmp_hdr * icmp_hdr,void * user_data)415 static int icmpv4_handle_echo_request(struct net_icmp_ctx *ctx,
416 struct net_pkt *pkt,
417 struct net_icmp_ip_hdr *hdr,
418 struct net_icmp_hdr *icmp_hdr,
419 void *user_data)
420 {
421 struct net_pkt *reply = NULL;
422 struct net_ipv4_hdr *ip_hdr = hdr->ipv4;
423 struct in_addr req_src, req_dst;
424 const struct in_addr *src;
425 int16_t payload_len;
426
427 net_ipv4_addr_copy_raw(req_src.s4_addr, ip_hdr->src);
428 net_ipv4_addr_copy_raw(req_dst.s4_addr, ip_hdr->dst);
429
430 /* If interface can not select src address based on dst addr
431 * and src address is unspecified, drop the echo request.
432 */
433 if (net_ipv4_is_addr_unspecified(&req_src)) {
434 NET_DBG("DROP: src addr is unspecified");
435 goto drop;
436 }
437
438 NET_DBG("Received Echo Request from %s to %s",
439 net_sprint_ipv4_addr(&req_src),
440 net_sprint_ipv4_addr(&req_dst));
441
442 payload_len = net_pkt_get_len(pkt) -
443 net_pkt_ip_hdr_len(pkt) -
444 net_pkt_ipv4_opts_len(pkt) - NET_ICMPH_LEN;
445 if (payload_len < NET_ICMPV4_UNUSED_LEN) {
446 /* No identifier or sequence number present */
447 goto drop;
448 }
449
450 reply = net_pkt_alloc_with_buffer(net_pkt_iface(pkt),
451 net_pkt_ipv4_opts_len(pkt) +
452 payload_len,
453 AF_INET, IPPROTO_ICMP,
454 PKT_WAIT_TIME);
455 if (!reply) {
456 NET_DBG("DROP: No buffer");
457 goto drop;
458 }
459
460 if (net_ipv4_is_addr_mcast(&req_dst) ||
461 net_ipv4_is_addr_bcast(net_pkt_iface(pkt), &req_dst)) {
462 src = net_if_ipv4_select_src_addr(net_pkt_iface(pkt), &req_src);
463
464 if (net_ipv4_is_addr_unspecified(src)) {
465 NET_DBG("DROP: No src address match");
466 goto drop;
467 }
468 } else {
469 src = &req_dst;
470 }
471
472 net_pkt_set_ip_dscp(reply, net_pkt_ip_dscp(pkt));
473 net_pkt_set_ip_ecn(reply, net_pkt_ip_ecn(pkt));
474
475 if (net_ipv4_create(reply, src, &req_src)) {
476 goto drop;
477 }
478
479 if (IS_ENABLED(CONFIG_NET_IPV4_HDR_OPTIONS)) {
480 if (net_pkt_ipv4_opts_len(pkt) &&
481 icmpv4_handle_header_options(pkt, reply, src)) {
482 goto drop;
483 }
484 }
485
486 if (net_icmpv4_create(reply, NET_ICMPV4_ECHO_REPLY, 0) ||
487 net_pkt_copy(reply, pkt, payload_len)) {
488 goto drop;
489 }
490
491 net_pkt_cursor_init(reply);
492 net_ipv4_finalize(reply, IPPROTO_ICMP);
493
494 NET_DBG("Sending Echo Reply from %s to %s",
495 net_sprint_ipv4_addr(src),
496 net_sprint_ipv4_addr(&req_src));
497
498 if (net_try_send_data(reply, K_NO_WAIT) < 0) {
499 goto drop;
500 }
501
502 net_stats_update_icmp_sent(net_pkt_iface(reply));
503
504 return 0;
505 drop:
506 if (reply) {
507 net_pkt_unref(reply);
508 }
509
510 net_stats_update_icmp_drop(net_pkt_iface(pkt));
511
512 return -EIO;
513 }
514
net_icmpv4_send_error(struct net_pkt * orig,uint8_t type,uint8_t code)515 int net_icmpv4_send_error(struct net_pkt *orig, uint8_t type, uint8_t code)
516 {
517 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
518 int err = -EIO;
519 struct net_ipv4_hdr *ip_hdr;
520 struct in_addr orig_src, orig_dst;
521 struct net_pkt *pkt;
522 size_t copy_len;
523
524 net_pkt_cursor_init(orig);
525
526 ip_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(orig, &ipv4_access);
527 if (!ip_hdr) {
528 goto drop_no_pkt;
529 }
530
531 if (ip_hdr->proto == IPPROTO_ICMP) {
532 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(icmpv4_access,
533 struct net_icmp_hdr);
534 struct net_icmp_hdr *icmp_hdr;
535
536 icmp_hdr = (struct net_icmp_hdr *)net_pkt_get_data(
537 orig, &icmpv4_access);
538 if (!icmp_hdr || icmp_hdr->code < 8) {
539 /* We must not send ICMP errors back */
540 err = -EINVAL;
541 goto drop_no_pkt;
542 }
543 }
544
545 net_ipv4_addr_copy_raw(orig_src.s4_addr, ip_hdr->src);
546 net_ipv4_addr_copy_raw(orig_dst.s4_addr, ip_hdr->dst);
547
548 if (net_ipv4_is_addr_bcast(net_pkt_iface(orig), &orig_dst)) {
549 /* We should not send an error to packet that
550 * were sent to broadcast
551 */
552 NET_DBG("Not sending error to bcast pkt from %s on proto %s",
553 net_sprint_ipv4_addr(&orig_src),
554 net_proto2str(AF_INET, ip_hdr->proto));
555 goto drop_no_pkt;
556 }
557
558 if (ip_hdr->proto == IPPROTO_UDP) {
559 copy_len = sizeof(struct net_ipv4_hdr) +
560 sizeof(struct net_udp_hdr);
561 } else if (ip_hdr->proto == IPPROTO_TCP) {
562 copy_len = sizeof(struct net_ipv4_hdr) +
563 sizeof(struct net_tcp_hdr);
564 } else {
565 copy_len = 0;
566 }
567
568 pkt = net_pkt_alloc_with_buffer(net_pkt_iface(orig),
569 copy_len + NET_ICMPV4_UNUSED_LEN,
570 AF_INET, IPPROTO_ICMP,
571 PKT_WAIT_TIME);
572 if (!pkt) {
573 err = -ENOMEM;
574 goto drop_no_pkt;
575 }
576
577 if (net_ipv4_create(pkt, &orig_dst, &orig_src) ||
578 net_icmpv4_create(pkt, type, code) ||
579 net_pkt_memset(pkt, 0, NET_ICMPV4_UNUSED_LEN) ||
580 net_pkt_copy(pkt, orig, copy_len)) {
581 goto drop;
582 }
583
584 net_pkt_cursor_init(pkt);
585 net_ipv4_finalize(pkt, IPPROTO_ICMP);
586
587 net_linkaddr_set(net_pkt_lladdr_dst(pkt),
588 net_pkt_lladdr_src(orig)->addr,
589 net_pkt_lladdr_src(orig)->len);
590
591 NET_DBG("Sending ICMPv4 Error Message type %d code %d from %s to %s",
592 type, code,
593 net_sprint_ipv4_addr(&orig_dst),
594 net_sprint_ipv4_addr(&orig_src));
595
596 if (net_try_send_data(pkt, K_NO_WAIT) >= 0) {
597 net_stats_update_icmp_sent(net_pkt_iface(orig));
598 return 0;
599 }
600
601 drop:
602 net_pkt_unref(pkt);
603
604 drop_no_pkt:
605 net_stats_update_icmp_drop(net_pkt_iface(orig));
606
607 return err;
608
609 }
610
net_icmpv4_input(struct net_pkt * pkt,struct net_ipv4_hdr * ip_hdr)611 enum net_verdict net_icmpv4_input(struct net_pkt *pkt,
612 struct net_ipv4_hdr *ip_hdr)
613 {
614 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(icmp_access,
615 struct net_icmp_hdr);
616 struct net_icmp_hdr *icmp_hdr;
617 int ret;
618
619 icmp_hdr = (struct net_icmp_hdr *)net_pkt_get_data(pkt, &icmp_access);
620 if (!icmp_hdr) {
621 NET_DBG("DROP: NULL ICMPv4 header");
622 return NET_DROP;
623 }
624
625 if (net_if_need_calc_rx_checksum(net_pkt_iface(pkt), NET_IF_CHECKSUM_IPV4_ICMP) ||
626 net_pkt_is_ip_reassembled(pkt)) {
627 if (net_calc_chksum_icmpv4(pkt) != 0U) {
628 NET_DBG("DROP: Invalid checksum");
629 goto drop;
630 }
631 }
632
633 if (net_ipv4_is_addr_bcast_raw(net_pkt_iface(pkt), ip_hdr->dst) &&
634 (!IS_ENABLED(CONFIG_NET_ICMPV4_ACCEPT_BROADCAST) ||
635 icmp_hdr->type != NET_ICMPV4_ECHO_REQUEST)) {
636 NET_DBG("DROP: broadcast pkt");
637 goto drop;
638 }
639
640 net_pkt_acknowledge_data(pkt, &icmp_access);
641
642 NET_DBG("ICMPv4 packet received type %d code %d",
643 icmp_hdr->type, icmp_hdr->code);
644
645 net_stats_update_icmp_recv(net_pkt_iface(pkt));
646
647 ret = net_icmp_call_ipv4_handlers(pkt, ip_hdr, icmp_hdr);
648 if (ret < 0 && ret != -ENOENT) {
649 NET_ERR("ICMPv4 handling failure (%d)", ret);
650 }
651
652 net_pkt_unref(pkt);
653
654 return NET_OK;
655
656 drop:
657 net_stats_update_icmp_drop(net_pkt_iface(pkt));
658
659 return NET_DROP;
660 }
661
662 #if defined(CONFIG_NET_IPV4_PMTU)
663 /* The RFC 1191 chapter 3 says the minimum MTU size is 68 octets.
664 * This is way too small in modern world, so make the minimum 576 octets.
665 */
666 #define MIN_IPV4_MTU NET_IPV4_MTU
667
icmpv4_handle_dst_unreach(struct net_icmp_ctx * ctx,struct net_pkt * pkt,struct net_icmp_ip_hdr * hdr,struct net_icmp_hdr * icmp_hdr,void * user_data)668 static int icmpv4_handle_dst_unreach(struct net_icmp_ctx *ctx,
669 struct net_pkt *pkt,
670 struct net_icmp_ip_hdr *hdr,
671 struct net_icmp_hdr *icmp_hdr,
672 void *user_data)
673 {
674 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(dst_unreach_access,
675 struct net_icmpv4_dest_unreach);
676 struct net_icmpv4_dest_unreach *dest_unreach_hdr;
677 struct net_ipv4_hdr *ip_hdr = hdr->ipv4;
678 uint16_t length = net_pkt_get_len(pkt);
679 struct net_pmtu_entry *entry;
680 struct sockaddr_in sockaddr_src = {
681 .sin_family = AF_INET,
682 };
683 uint16_t mtu;
684 int ret;
685
686 ARG_UNUSED(user_data);
687
688 dest_unreach_hdr = (struct net_icmpv4_dest_unreach *)
689 net_pkt_get_data(pkt, &dst_unreach_access);
690 if (dest_unreach_hdr == NULL) {
691 NET_DBG("DROP: NULL ICMPv4 Destination Unreachable header");
692 goto drop;
693 }
694
695 net_stats_update_ipv4_pmtu_recv(net_pkt_iface(pkt));
696
697 NET_DBG("Received Destination Unreachable from %s to %s",
698 net_sprint_ipv4_addr(&ip_hdr->src),
699 net_sprint_ipv4_addr(&ip_hdr->dst));
700
701 if (length < (sizeof(struct net_ipv4_hdr) +
702 sizeof(struct net_icmp_hdr) +
703 sizeof(struct net_icmpv4_dest_unreach))) {
704 NET_DBG("DROP: length %d too big %zd",
705 length, sizeof(struct net_ipv4_hdr) +
706 sizeof(struct net_icmp_hdr) +
707 sizeof(struct net_icmpv4_dest_unreach));
708 goto drop;
709 }
710
711 net_pkt_acknowledge_data(pkt, &dst_unreach_access);
712
713 mtu = ntohs(dest_unreach_hdr->mtu);
714
715 if (mtu < MIN_IPV4_MTU) {
716 NET_DBG("DROP: Unsupported MTU %u, min is %u",
717 mtu, MIN_IPV4_MTU);
718 goto drop;
719 }
720
721 net_ipaddr_copy(&sockaddr_src.sin_addr, (struct in_addr *)&ip_hdr->src);
722
723 entry = net_pmtu_get_entry((struct sockaddr *)&sockaddr_src);
724 if (entry == NULL) {
725 NET_DBG("DROP: Cannot find PMTU entry for %s",
726 net_sprint_ipv4_addr(&ip_hdr->src));
727 goto silent_drop;
728 }
729
730 /* We must not accept larger PMTU value than what we already know.
731 * RFC 1191 chapter 3 page 5.
732 */
733 if (entry->mtu > 0 && entry->mtu < mtu) {
734 NET_DBG("DROP: PMTU for %s %u larger than %u",
735 net_sprint_ipv4_addr(&ip_hdr->src), mtu,
736 entry->mtu);
737 goto silent_drop;
738 }
739
740 ret = net_pmtu_update_entry(entry, mtu);
741 if (ret > 0) {
742 NET_DBG("PMTU for %s changed from %u to %u",
743 net_sprint_ipv4_addr(&ip_hdr->src), ret, mtu);
744 }
745
746 return 0;
747 drop:
748 net_stats_update_ipv4_pmtu_drop(net_pkt_iface(pkt));
749
750 return -EIO;
751
752 silent_drop:
753 /* If the event is not really an error then just ignore it and
754 * return 0 so that icmpv4 module will not complain about it.
755 */
756 net_stats_update_ipv4_pmtu_drop(net_pkt_iface(pkt));
757
758 return 0;
759 }
760
761 static struct net_icmp_ctx dst_unreach_ctx;
762 #endif /* CONFIG_NET_IPV4_PMTU */
763
net_icmpv4_init(void)764 void net_icmpv4_init(void)
765 {
766 static struct net_icmp_ctx ctx;
767 int ret;
768
769 ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REQUEST, 0, icmpv4_handle_echo_request);
770 if (ret < 0) {
771 NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV4_ECHO_REQUEST),
772 ret);
773 }
774
775 #if defined(CONFIG_NET_IPV4_PMTU)
776 ret = net_icmp_init_ctx(&dst_unreach_ctx, NET_ICMPV4_DST_UNREACH, 0,
777 icmpv4_handle_dst_unreach);
778 if (ret < 0) {
779 NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV4_DST_UNREACH),
780 ret);
781 }
782 #endif
783 }
784