1 /*
2 * Copyright (c) 2018 Intel Corporation
3 * Copyright (c) 2022 Jamie McCrae
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_DECLARE(net_ipv4, CONFIG_NET_IPV4_LOG_LEVEL);
10
11 #include <errno.h>
12 #include <zephyr/net/net_core.h>
13 #include <zephyr/net/net_pkt.h>
14 #include <zephyr/net/net_stats.h>
15 #include <zephyr/net/net_context.h>
16 #include <zephyr/net/net_mgmt.h>
17 #include <zephyr/random/random.h>
18 #include "net_private.h"
19 #include "connection.h"
20 #include "icmpv4.h"
21 #include "udp_internal.h"
22 #include "tcp_internal.h"
23 #include "ipv4.h"
24 #include "route.h"
25 #include "net_stats.h"
26 #include "pmtu.h"
27
28 /* Timeout for various buffer allocations in this file. */
29 #define NET_BUF_TIMEOUT K_MSEC(100)
30
31 static void reassembly_timeout(struct k_work *work);
32
33 static struct net_ipv4_reassembly reassembly[CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT];
34
reassembly_get(uint16_t id,const uint8_t * src,const uint8_t * dst,uint8_t protocol)35 static struct net_ipv4_reassembly *reassembly_get(uint16_t id, const uint8_t *src,
36 const uint8_t *dst, uint8_t protocol)
37 {
38 int i, avail = -1;
39
40 for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
41 if (k_work_delayable_remaining_get(&reassembly[i].timer) &&
42 reassembly[i].id == id &&
43 net_ipv4_addr_cmp_raw(src, reassembly[i].src.s4_addr) &&
44 net_ipv4_addr_cmp_raw(dst, reassembly[i].dst.s4_addr) &&
45 reassembly[i].protocol == protocol) {
46 return &reassembly[i];
47 }
48
49 if (k_work_delayable_remaining_get(&reassembly[i].timer)) {
50 continue;
51 }
52
53 if (avail < 0) {
54 avail = i;
55 }
56 }
57
58 if (avail < 0) {
59 return NULL;
60 }
61
62 k_work_reschedule(&reassembly[avail].timer, K_SECONDS(CONFIG_NET_IPV4_FRAGMENT_TIMEOUT));
63
64 net_ipv4_addr_copy_raw(reassembly[avail].src.s4_addr, src);
65 net_ipv4_addr_copy_raw(reassembly[avail].dst.s4_addr, dst);
66
67 reassembly[avail].protocol = protocol;
68 reassembly[avail].id = id;
69
70 return &reassembly[avail];
71 }
72
reassembly_cancel(uint32_t id,struct in_addr * src,struct in_addr * dst)73 static bool reassembly_cancel(uint32_t id, struct in_addr *src, struct in_addr *dst)
74 {
75 int i, j;
76
77 LOG_DBG("Cancel 0x%x", id);
78
79 for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
80 int32_t remaining;
81
82 if (reassembly[i].id != id ||
83 !net_ipv4_addr_cmp(src, &reassembly[i].src) ||
84 !net_ipv4_addr_cmp(dst, &reassembly[i].dst)) {
85 continue;
86 }
87
88 remaining = k_ticks_to_ms_ceil32(
89 k_work_delayable_remaining_get(&reassembly[i].timer));
90 k_work_cancel_delayable(&reassembly[i].timer);
91
92 LOG_DBG("IPv4 reassembly id 0x%x remaining %d ms", reassembly[i].id, remaining);
93
94 reassembly[i].id = 0U;
95
96 for (j = 0; j < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; j++) {
97 if (!reassembly[i].pkt[j]) {
98 continue;
99 }
100
101 LOG_DBG("[%d] IPv4 reassembly pkt %p %zd bytes data", j,
102 reassembly[i].pkt[j], net_pkt_get_len(reassembly[i].pkt[j]));
103
104 net_pkt_unref(reassembly[i].pkt[j]);
105 reassembly[i].pkt[j] = NULL;
106 }
107
108 return true;
109 }
110
111 return false;
112 }
113
reassembly_info(char * str,struct net_ipv4_reassembly * reass)114 static void reassembly_info(char *str, struct net_ipv4_reassembly *reass)
115 {
116 LOG_DBG("%s id 0x%x src %s dst %s remain %d ms", str, reass->id,
117 net_sprint_ipv4_addr(&reass->src),
118 net_sprint_ipv4_addr(&reass->dst),
119 k_ticks_to_ms_ceil32(
120 k_work_delayable_remaining_get(&reass->timer)));
121 }
122
reassembly_timeout(struct k_work * work)123 static void reassembly_timeout(struct k_work *work)
124 {
125 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
126 struct net_ipv4_reassembly *reass =
127 CONTAINER_OF(dwork, struct net_ipv4_reassembly, timer);
128
129 reassembly_info("Reassembly cancelled", reass);
130
131 /* Send a ICMPv4 Time Exceeded only if we received the first fragment */
132 if (reass->pkt[0] && net_pkt_ipv4_fragment_offset(reass->pkt[0]) == 0) {
133 net_icmpv4_send_error(reass->pkt[0], NET_ICMPV4_TIME_EXCEEDED,
134 NET_ICMPV4_TIME_EXCEEDED_FRAGMENT_REASSEMBLY_TIME);
135 }
136
137 reassembly_cancel(reass->id, &reass->src, &reass->dst);
138 }
139
reassemble_packet(struct net_ipv4_reassembly * reass)140 static void reassemble_packet(struct net_ipv4_reassembly *reass)
141 {
142 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
143 struct net_ipv4_hdr *ipv4_hdr;
144 struct net_pkt *pkt;
145 struct net_buf *last;
146 int i;
147
148 k_work_cancel_delayable(&reass->timer);
149
150 NET_ASSERT(reass->pkt[0]);
151
152 last = net_buf_frag_last(reass->pkt[0]->buffer);
153
154 /* We start from 2nd packet which is then appended to the first one */
155 for (i = 1; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
156 pkt = reass->pkt[i];
157 if (!pkt) {
158 break;
159 }
160
161 net_pkt_cursor_init(pkt);
162
163 /* Get rid of IPv4 header which is at the beginning of the fragment. */
164 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
165 if (!ipv4_hdr) {
166 goto error;
167 }
168
169 LOG_DBG("Removing %d bytes from start of pkt %p", net_pkt_ip_hdr_len(pkt),
170 pkt->buffer);
171
172 if (net_pkt_pull(pkt, net_pkt_ip_hdr_len(pkt))) {
173 LOG_ERR("Failed to pull headers");
174 reassembly_cancel(reass->id, &reass->src, &reass->dst);
175 return;
176 }
177
178 /* Attach the data to the previous packet */
179 last->frags = pkt->buffer;
180 last = net_buf_frag_last(pkt->buffer);
181
182 pkt->buffer = NULL;
183 reass->pkt[i] = NULL;
184
185 net_pkt_unref(pkt);
186 }
187
188 pkt = reass->pkt[0];
189 reass->pkt[0] = NULL;
190
191 /* Update the header details for the packet */
192 net_pkt_cursor_init(pkt);
193
194 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
195 if (!ipv4_hdr) {
196 goto error;
197 }
198
199 /* Fix the total length, offset and checksum of the IPv4 packet */
200 ipv4_hdr->len = htons(net_pkt_get_len(pkt));
201 ipv4_hdr->offset[0] = 0;
202 ipv4_hdr->offset[1] = 0;
203 ipv4_hdr->chksum = 0;
204 ipv4_hdr->chksum = net_calc_chksum_ipv4(pkt);
205
206 net_pkt_set_data(pkt, &ipv4_access);
207 net_pkt_set_ip_reassembled(pkt, true);
208
209 LOG_DBG("New pkt %p IPv4 len is %zd bytes", pkt, net_pkt_get_len(pkt));
210
211 /* We need to use the queue when feeding the packet back into the
212 * IP stack as we might run out of stack if we call processing_data()
213 * directly. As the packet does not contain link layer header, we
214 * MUST NOT pass it to L2 so there will be a special check for that
215 * in process_data() when handling the packet.
216 */
217 if (net_recv_data(net_pkt_iface(pkt), pkt) >= 0) {
218 return;
219 }
220
221 error:
222 net_pkt_unref(pkt);
223 }
224
net_ipv4_frag_foreach(net_ipv4_frag_cb_t cb,void * user_data)225 void net_ipv4_frag_foreach(net_ipv4_frag_cb_t cb, void *user_data)
226 {
227 int i;
228
229 for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
230 if (!k_work_delayable_remaining_get(&reassembly[i].timer)) {
231 continue;
232 }
233
234 cb(&reassembly[i], user_data);
235 }
236 }
237
238 /* Verify that we have all the fragments received and in correct order.
239 * Return:
240 * - a negative value if the fragments are erroneous and must be dropped
241 * - zero if we are expecting more fragments
242 * - a positive value if we can proceed with the reassembly
243 */
fragments_are_ready(struct net_ipv4_reassembly * reass)244 static int fragments_are_ready(struct net_ipv4_reassembly *reass)
245 {
246 unsigned int expected_offset = 0;
247 bool more = true;
248 int i;
249
250 /* Fragments can arrive in any order, for example in reverse order:
251 * 1 -> Fragment3(M=0, offset=x2)
252 * 2 -> Fragment2(M=1, offset=x1)
253 * 3 -> Fragment1(M=1, offset=0)
254 * We have to test several requirements before proceeding with the reassembly:
255 * - We received the first fragment (Fragment Offset is 0)
256 * - All intermediate fragments are contiguous
257 * - The More bit of the last fragment is 0
258 */
259 for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
260 struct net_pkt *pkt = reass->pkt[i];
261 unsigned int offset;
262 int payload_len;
263
264 if (!pkt) {
265 break;
266 }
267
268 offset = net_pkt_ipv4_fragment_offset(pkt);
269
270 if (offset < expected_offset) {
271 /* Overlapping or duplicated, drop it */
272 return -EBADMSG;
273 } else if (offset != expected_offset) {
274 /* Not contiguous, let's wait for fragments */
275 return 0;
276 }
277
278 payload_len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt);
279
280 if (payload_len < 0) {
281 return -EBADMSG;
282 }
283
284 expected_offset += payload_len;
285 more = net_pkt_ipv4_fragment_more(pkt);
286 }
287
288 if (more) {
289 return 0;
290 }
291
292 return 1;
293 }
294
shift_packets(struct net_ipv4_reassembly * reass,int pos)295 static int shift_packets(struct net_ipv4_reassembly *reass, int pos)
296 {
297 int i;
298
299 for (i = pos + 1; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
300 if (!reass->pkt[i]) {
301 LOG_DBG("Moving [%d] %p (offset 0x%x) to [%d]", pos, reass->pkt[pos],
302 net_pkt_ipv4_fragment_offset(reass->pkt[pos]), pos + 1);
303
304 /* pkt[i] is free, so shift everything between [pos] and [i - 1] by one
305 * element
306 */
307 memmove(&reass->pkt[pos + 1], &reass->pkt[pos],
308 sizeof(void *) * (i - pos));
309
310 /* pkt[pos] is now free */
311 reass->pkt[pos] = NULL;
312
313 return 0;
314 }
315 }
316
317 /* We do not have free space left in the array */
318 return -ENOMEM;
319 }
320
net_ipv4_handle_fragment_hdr(struct net_pkt * pkt,struct net_ipv4_hdr * hdr)321 enum net_verdict net_ipv4_handle_fragment_hdr(struct net_pkt *pkt, struct net_ipv4_hdr *hdr)
322 {
323 struct net_ipv4_reassembly *reass = NULL;
324 uint16_t flag;
325 bool found;
326 uint8_t more;
327 uint16_t id;
328 int ret;
329 int i;
330
331 flag = ntohs(*((uint16_t *)&hdr->offset));
332 id = ntohs(*((uint16_t *)&hdr->id));
333
334 reass = reassembly_get(id, hdr->src, hdr->dst, hdr->proto);
335 if (!reass) {
336 LOG_ERR("Cannot get reassembly slot, dropping pkt %p", pkt);
337 goto drop;
338 }
339
340 more = (flag & NET_IPV4_MORE_FRAG_MASK) ? true : false;
341 net_pkt_set_ipv4_fragment_flags(pkt, flag);
342
343 if (more && (net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt)) % 8) {
344 /* Fragment length is not multiple of 8, discard the packet and send bad IP
345 * header error.
346 */
347 net_icmpv4_send_error(pkt, NET_ICMPV4_BAD_IP_HEADER,
348 NET_ICMPV4_BAD_IP_HEADER_LENGTH);
349 goto drop;
350 }
351
352 /* The fragments might come in wrong order so place them in the reassembly chain in the
353 * correct order.
354 */
355 for (i = 0, found = false; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
356 if (reass->pkt[i]) {
357 if (net_pkt_ipv4_fragment_offset(reass->pkt[i]) <
358 net_pkt_ipv4_fragment_offset(pkt)) {
359 continue;
360 }
361
362 /* Make room for this fragment. If there is no room then it will discard
363 * the whole reassembly.
364 */
365 if (shift_packets(reass, i)) {
366 break;
367 }
368 }
369
370 LOG_DBG("Storing pkt %p to slot %d offset %d", pkt, i,
371 net_pkt_ipv4_fragment_offset(pkt));
372 reass->pkt[i] = pkt;
373 found = true;
374
375 break;
376 }
377
378 if (!found) {
379 /* We could not add this fragment into our saved fragment list. The whole packet
380 * must be discarded at this point.
381 */
382 LOG_ERR("No slots available for 0x%x", reass->id);
383 net_pkt_unref(pkt);
384 goto drop;
385 }
386
387 ret = fragments_are_ready(reass);
388 if (ret < 0) {
389 LOG_ERR("Reassembled IPv4 verify failed, dropping id %u", reass->id);
390
391 /* Let the caller release the already inserted pkt */
392 if (i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT) {
393 reass->pkt[i] = NULL;
394 }
395
396 net_pkt_unref(pkt);
397 goto drop;
398 } else if (ret == 0) {
399 reassembly_info("Reassembly nth pkt", reass);
400
401 LOG_DBG("More fragments to be received");
402 goto accept;
403 }
404
405 reassembly_info("Reassembly last pkt", reass);
406
407 /* The last fragment received, reassemble the packet */
408 reassemble_packet(reass);
409
410 accept:
411 return NET_OK;
412
413 drop:
414 if (reass) {
415 if (reassembly_cancel(reass->id, &reass->src, &reass->dst)) {
416 return NET_OK;
417 }
418 }
419
420 return NET_DROP;
421 }
422
send_ipv4_fragment(struct net_pkt * pkt,uint16_t rand_id,uint16_t fit_len,uint16_t frag_offset,bool final)423 static int send_ipv4_fragment(struct net_pkt *pkt, uint16_t rand_id, uint16_t fit_len,
424 uint16_t frag_offset, bool final)
425 {
426 int ret = -ENOBUFS;
427 struct net_pkt *frag_pkt;
428 struct net_pkt_cursor cur;
429 struct net_pkt_cursor cur_pkt;
430 uint16_t offset_pkt;
431
432 frag_pkt = net_pkt_alloc_with_buffer(net_pkt_iface(pkt), fit_len +
433 net_pkt_ip_hdr_len(pkt),
434 AF_INET, 0, NET_BUF_TIMEOUT);
435 if (!frag_pkt) {
436 return -ENOMEM;
437 }
438
439 net_pkt_cursor_init(frag_pkt);
440 net_pkt_cursor_backup(pkt, &cur_pkt);
441 net_pkt_cursor_backup(frag_pkt, &cur);
442
443 net_pkt_set_ll_proto_type(frag_pkt, net_pkt_ll_proto_type(pkt));
444
445 /* Copy the original IPv4 headers back to the fragment packet */
446 if (net_pkt_copy(frag_pkt, pkt, net_pkt_ip_hdr_len(pkt))) {
447 goto fail;
448 }
449
450 net_pkt_cursor_restore(pkt, &cur_pkt);
451
452 /* Copy the payload part of this fragment from the original packet */
453 if (net_pkt_skip(pkt, (frag_offset + net_pkt_ip_hdr_len(pkt))) ||
454 net_pkt_copy(frag_pkt, pkt, fit_len)) {
455 goto fail;
456 }
457
458 net_pkt_cursor_restore(frag_pkt, &cur);
459 net_pkt_cursor_restore(pkt, &cur_pkt);
460
461 net_pkt_set_ip_hdr_len(frag_pkt, net_pkt_ip_hdr_len(pkt));
462
463 net_pkt_set_overwrite(frag_pkt, true);
464 net_pkt_cursor_init(frag_pkt);
465
466 /* Update the header of the packet */
467 NET_PKT_DATA_ACCESS_DEFINE(ipv4_access, struct net_ipv4_hdr);
468 struct net_ipv4_hdr *ipv4_hdr;
469
470 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(frag_pkt, &ipv4_access);
471 if (!ipv4_hdr) {
472 goto fail;
473 }
474
475 memcpy(ipv4_hdr->id, &rand_id, sizeof(rand_id));
476 offset_pkt = frag_offset / 8;
477
478 if (!final) {
479 offset_pkt |= NET_IPV4_MORE_FRAG_MASK;
480 }
481
482 sys_put_be16(offset_pkt, ipv4_hdr->offset);
483 ipv4_hdr->len = htons((fit_len + net_pkt_ip_hdr_len(pkt)));
484
485 ipv4_hdr->chksum = 0;
486 ipv4_hdr->chksum = net_calc_chksum_ipv4(frag_pkt);
487
488 net_pkt_set_chksum_done(frag_pkt, true);
489
490 net_pkt_set_data(frag_pkt, &ipv4_access);
491
492 net_pkt_set_overwrite(frag_pkt, false);
493 net_pkt_cursor_restore(frag_pkt, &cur);
494
495 if (final) {
496 net_pkt_set_context(frag_pkt, net_pkt_context(pkt));
497 }
498
499 /* If everything has been ok so far, we can send the packet. */
500 ret = net_send_data(frag_pkt);
501 if (ret < 0) {
502 goto fail;
503 }
504
505 /* Let this packet to be sent and hopefully it will release the memory that can be
506 * utilized for next IPv4 fragment.
507 */
508 k_yield();
509
510 return 0;
511
512 fail:
513 LOG_ERR("Cannot send fragment (%d)", ret);
514 net_pkt_unref(frag_pkt);
515
516 return ret;
517 }
518
net_ipv4_send_fragmented_pkt(struct net_if * iface,struct net_pkt * pkt,uint16_t pkt_len,uint16_t mtu)519 int net_ipv4_send_fragmented_pkt(struct net_if *iface, struct net_pkt *pkt,
520 uint16_t pkt_len, uint16_t mtu)
521 {
522 uint16_t frag_offset = 0;
523 uint16_t flag;
524 int fit_len;
525 int ret;
526 struct net_ipv4_hdr *frag_hdr;
527
528 NET_PKT_DATA_ACCESS_DEFINE(frag_access, struct net_ipv4_hdr);
529 frag_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &frag_access);
530 if (!frag_hdr) {
531 return -EINVAL;
532 }
533
534 /* Check if the DF (Don't Fragment) flag is set, if so, we cannot fragment the packet */
535 flag = ntohs(*((uint16_t *)&frag_hdr->offset));
536
537 if (flag & NET_IPV4_DO_NOT_FRAG_MASK) {
538 /* This packet cannot be fragmented */
539 return -EPERM;
540 }
541
542 /* Generate a random ID to be used for packet identification, ensuring that it is not 0 */
543 uint16_t rand_id = sys_rand16_get();
544
545 if (rand_id == 0) {
546 rand_id = 1;
547 }
548
549 /* Calculate maximum payload that can fit into each packet after IPv4 header. Offsets are
550 * multiples of 8, therefore round down to nearest 8-byte boundary.
551 */
552 fit_len = (mtu - net_pkt_ip_hdr_len(pkt)) / 8;
553
554 if (fit_len <= 0) {
555 LOG_ERR("No room for IPv4 payload MTU %d hdrs_len %d", mtu,
556 net_pkt_ip_hdr_len(pkt));
557 return -EINVAL;
558 }
559
560 fit_len *= 8;
561
562 pkt_len -= net_pkt_ip_hdr_len(pkt);
563
564 /* Calculate the L4 checksum (if not done already) before the fragmentation. */
565 if (!net_pkt_is_chksum_done(pkt)) {
566 struct net_pkt_cursor backup;
567
568 net_pkt_cursor_backup(pkt, &backup);
569 net_pkt_acknowledge_data(pkt, &frag_access);
570
571 switch (frag_hdr->proto) {
572 case IPPROTO_ICMP:
573 ret = net_icmpv4_finalize(pkt, true);
574 break;
575 case IPPROTO_TCP:
576 ret = net_tcp_finalize(pkt, true);
577 break;
578 case IPPROTO_UDP:
579 ret = net_udp_finalize(pkt, true);
580 break;
581 default:
582 ret = 0;
583 break;
584 }
585
586 if (ret < 0) {
587 return ret;
588 }
589
590 net_pkt_cursor_restore(pkt, &backup);
591 }
592
593 while (frag_offset < pkt_len) {
594 bool final = false;
595
596 if ((frag_offset + fit_len) >= pkt_len) {
597 final = true;
598 fit_len = (pkt_len - frag_offset);
599 }
600
601 ret = send_ipv4_fragment(pkt, rand_id, fit_len, frag_offset, final);
602 if (ret < 0) {
603 return ret;
604 }
605
606 frag_offset += fit_len;
607 }
608
609 return 0;
610 }
611
net_ipv4_prepare_for_send_fragment(struct net_pkt * pkt)612 enum net_verdict net_ipv4_prepare_for_send_fragment(struct net_pkt *pkt)
613 {
614 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
615 struct net_ipv4_hdr *ip_hdr;
616 int ret;
617
618 NET_ASSERT(pkt && pkt->buffer);
619
620 ip_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
621 if (!ip_hdr) {
622 return NET_DROP;
623 }
624
625 /* If we have already fragmented the packet, the ID field will contain a non-zero value
626 * and we can skip other checks.
627 */
628 if (ip_hdr->id[0] == 0 && ip_hdr->id[1] == 0) {
629 size_t pkt_len = net_pkt_get_len(pkt);
630 uint16_t mtu;
631
632 if (IS_ENABLED(CONFIG_NET_IPV4_PMTU)) {
633 struct sockaddr_in dst = {
634 .sin_family = AF_INET,
635 .sin_addr = *((struct in_addr *)ip_hdr->dst),
636 };
637
638 ret = net_pmtu_get_mtu((struct sockaddr *)&dst);
639 if (ret <= 0) {
640 goto use_interface_mtu;
641 }
642
643 mtu = ret;
644 } else {
645 use_interface_mtu:
646 mtu = net_if_get_mtu(net_pkt_iface(pkt));
647 mtu = MAX(NET_IPV4_MTU, mtu);
648 }
649
650 if (pkt_len > mtu) {
651 ret = net_ipv4_send_fragmented_pkt(net_pkt_iface(pkt), pkt, pkt_len, mtu);
652
653 if (ret < 0) {
654 LOG_DBG("Cannot fragment IPv4 pkt (%d)", ret);
655
656 if (ret == -EPERM) {
657 /* Try to send the packet if the don't fragment flag is set
658 * and hope the original large packet can be sent OK.
659 */
660 goto ignore_frag_error;
661 }
662
663 /* Other error, drop the packet */
664 return NET_DROP;
665 }
666
667 /* We need to unref here because we simulate the packet being sent. */
668 net_pkt_unref(pkt);
669
670 /* No need to continue with the sending as the packet is now split and
671 * its fragments will be sent separately to the network.
672 */
673 return NET_CONTINUE;
674 }
675 }
676
677 ignore_frag_error:
678
679 return NET_OK;
680 }
681
net_ipv4_setup_fragment_buffers(void)682 void net_ipv4_setup_fragment_buffers(void)
683 {
684 /* Static initialising does not work here because of the array, so we must do it at
685 * runtime.
686 */
687 for (int i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
688 k_work_init_delayable(&reassembly[i].timer, reassembly_timeout);
689 }
690 }
691