1 /** @file
2 * @brief ARP related functions
3 */
4
5 /*
6 * Copyright (c) 2016 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_arp, CONFIG_NET_ARP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/net/net_core.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_stats.h>
19 #include <zephyr/net/net_mgmt.h>
20
21 #include "arp.h"
22 #include "ipv4.h"
23 #include "net_private.h"
24
25 #define NET_BUF_TIMEOUT K_MSEC(100)
26 #define ARP_REQUEST_TIMEOUT (2 * MSEC_PER_SEC)
27
28 static bool arp_cache_initialized;
29 static struct arp_entry arp_entries[CONFIG_NET_ARP_TABLE_SIZE];
30
31 static sys_slist_t arp_free_entries;
32 static sys_slist_t arp_pending_entries;
33 static sys_slist_t arp_table;
34
35 static struct k_work_delayable arp_request_timer;
36
37 static struct k_mutex arp_mutex;
38
39 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
40 static struct net_mgmt_event_callback iface_event_cb;
41 static struct net_mgmt_event_callback ipv4_event_cb;
42 static struct k_work_delayable arp_gratuitous_work;
43 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
44
arp_entry_cleanup(struct arp_entry * entry,bool pending)45 static void arp_entry_cleanup(struct arp_entry *entry, bool pending)
46 {
47 NET_DBG("entry %p", entry);
48
49 if (pending) {
50 struct net_pkt *pkt;
51
52 while (!k_fifo_is_empty(&entry->pending_queue)) {
53 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
54 NET_DBG("Releasing pending pkt %p (ref %ld)",
55 pkt,
56 atomic_get(&pkt->atomic_ref) - 1);
57 net_pkt_unref(pkt);
58 }
59 }
60
61 entry->iface = NULL;
62
63 (void)memset(&entry->ip, 0, sizeof(struct in_addr));
64 (void)memset(&entry->eth, 0, sizeof(struct net_eth_addr));
65 }
66
arp_entry_find(sys_slist_t * list,struct net_if * iface,struct in_addr * dst,sys_snode_t ** previous)67 static struct arp_entry *arp_entry_find(sys_slist_t *list,
68 struct net_if *iface,
69 struct in_addr *dst,
70 sys_snode_t **previous)
71 {
72 struct arp_entry *entry;
73
74 SYS_SLIST_FOR_EACH_CONTAINER(list, entry, node) {
75 NET_DBG("iface %d (%p) dst %s",
76 net_if_get_by_iface(iface), iface,
77 net_sprint_ipv4_addr(&entry->ip));
78
79 if (entry->iface == iface &&
80 net_ipv4_addr_cmp(&entry->ip, dst)) {
81 NET_DBG("found dst %s",
82 net_sprint_ipv4_addr(dst));
83
84 return entry;
85 }
86
87 if (previous) {
88 *previous = &entry->node;
89 }
90 }
91
92 return NULL;
93 }
94
arp_entry_find_move_first(struct net_if * iface,struct in_addr * dst)95 static inline struct arp_entry *arp_entry_find_move_first(struct net_if *iface,
96 struct in_addr *dst)
97 {
98 sys_snode_t *prev = NULL;
99 struct arp_entry *entry;
100
101 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
102
103 entry = arp_entry_find(&arp_table, iface, dst, &prev);
104 if (entry) {
105 /* Let's assume the target is going to be accessed
106 * more than once here in a short time frame. So we
107 * place the entry first in position into the table
108 * in order to reduce subsequent find.
109 */
110 if (&entry->node != sys_slist_peek_head(&arp_table)) {
111 sys_slist_remove(&arp_table, prev, &entry->node);
112 sys_slist_prepend(&arp_table, &entry->node);
113 }
114 }
115
116 return entry;
117 }
118
119 static inline
arp_entry_find_pending(struct net_if * iface,struct in_addr * dst)120 struct arp_entry *arp_entry_find_pending(struct net_if *iface,
121 struct in_addr *dst)
122 {
123 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
124
125 return arp_entry_find(&arp_pending_entries, iface, dst, NULL);
126 }
127
arp_entry_get_pending(struct net_if * iface,struct in_addr * dst)128 static struct arp_entry *arp_entry_get_pending(struct net_if *iface,
129 struct in_addr *dst)
130 {
131 sys_snode_t *prev = NULL;
132 struct arp_entry *entry;
133
134 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
135
136 entry = arp_entry_find(&arp_pending_entries, iface, dst, &prev);
137 if (entry) {
138 /* We remove the entry from the pending list */
139 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
140 }
141
142 if (sys_slist_is_empty(&arp_pending_entries)) {
143 k_work_cancel_delayable(&arp_request_timer);
144 }
145
146 return entry;
147 }
148
arp_entry_get_free(void)149 static struct arp_entry *arp_entry_get_free(void)
150 {
151 sys_snode_t *node;
152
153 node = sys_slist_peek_head(&arp_free_entries);
154 if (!node) {
155 return NULL;
156 }
157
158 /* We remove the node from the free list */
159 sys_slist_remove(&arp_free_entries, NULL, node);
160
161 return CONTAINER_OF(node, struct arp_entry, node);
162 }
163
arp_entry_get_last_from_table(void)164 static struct arp_entry *arp_entry_get_last_from_table(void)
165 {
166 sys_snode_t *node;
167
168 /* We assume last entry is the oldest one,
169 * so is the preferred one to be taken out.
170 */
171
172 node = sys_slist_peek_tail(&arp_table);
173 if (!node) {
174 return NULL;
175 }
176
177 sys_slist_find_and_remove(&arp_table, node);
178
179 return CONTAINER_OF(node, struct arp_entry, node);
180 }
181
182
arp_entry_register_pending(struct arp_entry * entry)183 static void arp_entry_register_pending(struct arp_entry *entry)
184 {
185 NET_DBG("dst %s", net_sprint_ipv4_addr(&entry->ip));
186
187 sys_slist_append(&arp_pending_entries, &entry->node);
188
189 entry->req_start = k_uptime_get_32();
190
191 /* Let's start the timer if necessary */
192 if (!k_work_delayable_remaining_get(&arp_request_timer)) {
193 k_work_reschedule(&arp_request_timer,
194 K_MSEC(ARP_REQUEST_TIMEOUT));
195 }
196 }
197
arp_request_timeout(struct k_work * work)198 static void arp_request_timeout(struct k_work *work)
199 {
200 uint32_t current = k_uptime_get_32();
201 struct arp_entry *entry, *next;
202
203 ARG_UNUSED(work);
204
205 k_mutex_lock(&arp_mutex, K_FOREVER);
206
207 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
208 entry, next, node) {
209 if ((int32_t)(entry->req_start +
210 ARP_REQUEST_TIMEOUT - current) > 0) {
211 break;
212 }
213
214 arp_entry_cleanup(entry, true);
215
216 sys_slist_remove(&arp_pending_entries, NULL, &entry->node);
217 sys_slist_append(&arp_free_entries, &entry->node);
218
219 entry = NULL;
220 }
221
222 if (entry) {
223 k_work_reschedule(&arp_request_timer,
224 K_MSEC(entry->req_start +
225 ARP_REQUEST_TIMEOUT - current));
226 }
227
228 k_mutex_unlock(&arp_mutex);
229 }
230
if_get_addr(struct net_if * iface,const uint8_t * addr)231 static inline struct in_addr *if_get_addr(struct net_if *iface,
232 const uint8_t *addr)
233 {
234 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
235
236 if (!ipv4) {
237 return NULL;
238 }
239
240 ARRAY_FOR_EACH(ipv4->unicast, i) {
241 if (ipv4->unicast[i].ipv4.is_used &&
242 ipv4->unicast[i].ipv4.address.family == AF_INET &&
243 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED &&
244 (!addr ||
245 net_ipv4_addr_cmp_raw(
246 addr, ipv4->unicast[i].ipv4.address.in_addr.s4_addr))) {
247 return &ipv4->unicast[i].ipv4.address.in_addr;
248 }
249 }
250
251 return NULL;
252 }
253
arp_prepare(struct net_if * iface,struct in_addr * next_addr,struct arp_entry * entry,struct net_pkt * pending,struct in_addr * current_ip)254 static inline struct net_pkt *arp_prepare(struct net_if *iface,
255 struct in_addr *next_addr,
256 struct arp_entry *entry,
257 struct net_pkt *pending,
258 struct in_addr *current_ip)
259 {
260 struct net_arp_hdr *hdr;
261 struct in_addr *my_addr;
262 struct net_pkt *pkt;
263
264 if (current_ip) {
265 /* This is the IPv4 autoconf case where we have already
266 * things setup so no need to allocate new net_pkt
267 */
268 pkt = pending;
269 } else {
270 pkt = net_pkt_alloc_with_buffer(iface,
271 sizeof(struct net_arp_hdr),
272 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
273 if (!pkt) {
274 return NULL;
275 }
276
277 /* Avoid recursive loop with network packet capturing */
278 if (IS_ENABLED(CONFIG_NET_CAPTURE) && pending) {
279 net_pkt_set_captured(pkt, net_pkt_is_captured(pending));
280 }
281
282 if (IS_ENABLED(CONFIG_NET_VLAN) && pending) {
283 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(pending));
284 }
285 }
286
287 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
288 net_pkt_set_family(pkt, AF_INET);
289
290 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
291
292 hdr = NET_ARP_HDR(pkt);
293
294 /* If entry is not set, then we are just about to send
295 * an ARP request using the data in pending net_pkt.
296 * This can happen if there is already a pending ARP
297 * request and we want to send it again.
298 */
299 if (entry) {
300 if (!net_pkt_ipv4_acd(pkt)) {
301 net_pkt_ref(pending);
302 k_fifo_put(&entry->pending_queue, pending);
303 }
304
305 entry->iface = net_pkt_iface(pkt);
306
307 net_ipaddr_copy(&entry->ip, next_addr);
308
309 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
310 net_if_get_link_addr(entry->iface)->addr,
311 sizeof(struct net_eth_addr));
312
313 arp_entry_register_pending(entry);
314 } else {
315 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
316 net_if_get_link_addr(iface)->addr,
317 sizeof(struct net_eth_addr));
318 }
319
320 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
321 (const uint8_t *)net_eth_broadcast_addr(),
322 sizeof(struct net_eth_addr));
323
324 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
325 hdr->protocol = htons(NET_ETH_PTYPE_IP);
326 hdr->hwlen = sizeof(struct net_eth_addr);
327 hdr->protolen = sizeof(struct in_addr);
328 hdr->opcode = htons(NET_ARP_REQUEST);
329
330 (void)memset(&hdr->dst_hwaddr.addr, 0x00, sizeof(struct net_eth_addr));
331
332 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)next_addr);
333
334 memcpy(hdr->src_hwaddr.addr, net_pkt_lladdr_src(pkt)->addr,
335 sizeof(struct net_eth_addr));
336
337 if (net_pkt_ipv4_acd(pkt)) {
338 my_addr = current_ip;
339 } else if (!entry) {
340 my_addr = (struct in_addr *)NET_IPV4_HDR(pending)->src;
341 } else {
342 my_addr = if_get_addr(entry->iface, (const uint8_t *)current_ip);
343 }
344
345 if (my_addr) {
346 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)my_addr);
347 } else {
348 (void)memset(&hdr->src_ipaddr, 0, sizeof(struct in_addr));
349 }
350
351 NET_DBG("Generating request for %s", net_sprint_ipv4_addr(next_addr));
352 return pkt;
353 }
354
net_arp_prepare(struct net_pkt * pkt,struct in_addr * request_ip,struct in_addr * current_ip,struct net_pkt ** arp_pkt)355 int net_arp_prepare(struct net_pkt *pkt,
356 struct in_addr *request_ip,
357 struct in_addr *current_ip,
358 struct net_pkt **arp_pkt)
359 {
360 bool is_ipv4_ll_used = false;
361 struct arp_entry *entry;
362 struct in_addr *addr;
363
364 if (!pkt || !pkt->buffer) {
365 return -EINVAL;
366 }
367
368 if (net_pkt_ipv4_acd(pkt)) {
369 *arp_pkt = arp_prepare(net_pkt_iface(pkt), request_ip, NULL,
370 pkt, current_ip);
371 return *arp_pkt ? NET_ARP_PKT_REPLACED : -ENOMEM;
372 }
373
374 if (IS_ENABLED(CONFIG_NET_IPV4_AUTO)) {
375 is_ipv4_ll_used = net_ipv4_is_ll_addr_raw(NET_IPV4_HDR(pkt)->src) ||
376 net_ipv4_is_ll_addr_raw(NET_IPV4_HDR(pkt)->dst);
377 }
378
379 /* Is the destination in the local network, if not route via
380 * the gateway address.
381 */
382 if (!current_ip && !is_ipv4_ll_used &&
383 !net_if_ipv4_addr_mask_cmp(net_pkt_iface(pkt), request_ip)) {
384 struct net_if_ipv4 *ipv4 = net_pkt_iface(pkt)->config.ip.ipv4;
385
386 if (ipv4) {
387 addr = &ipv4->gw;
388 if (net_ipv4_is_addr_unspecified(addr)) {
389 NET_ERR("Gateway not set for iface %d, could not "
390 "send ARP request for %s",
391 net_if_get_by_iface(net_pkt_iface(pkt)),
392 net_sprint_ipv4_addr(request_ip));
393
394 return -EINVAL;
395 }
396 } else {
397 addr = request_ip;
398 }
399 } else {
400 addr = request_ip;
401 }
402
403 k_mutex_lock(&arp_mutex, K_FOREVER);
404
405 /* If the destination address is already known, we do not need
406 * to send any ARP packet.
407 */
408 entry = arp_entry_find_move_first(net_pkt_iface(pkt), addr);
409 if (!entry) {
410 struct net_pkt *req;
411
412 entry = arp_entry_find_pending(net_pkt_iface(pkt), addr);
413 if (!entry) {
414 /* No pending, let's try to get a new entry */
415 entry = arp_entry_get_free();
416 if (!entry) {
417 /* Then let's take one from table? */
418 entry = arp_entry_get_last_from_table();
419 }
420 } else {
421 /* There is a pending ARP request already, check if this packet is already
422 * in the pending list and if so, resend the request, otherwise just
423 * append the packet to the request fifo list.
424 * Ensure the packet reference is incremented to account for the queue
425 * holding the reference.
426 */
427 pkt = net_pkt_ref(pkt);
428 if (k_queue_unique_append(&entry->pending_queue._queue, pkt)) {
429 NET_DBG("Pending ARP request for %s, queuing pkt %p",
430 net_sprint_ipv4_addr(addr), pkt);
431 k_mutex_unlock(&arp_mutex);
432 return NET_ARP_PKT_QUEUED;
433 }
434
435 /* Queueing the packet failed, undo the net_pkt_ref */
436 net_pkt_unref(pkt);
437 entry = NULL;
438 }
439
440 req = arp_prepare(net_pkt_iface(pkt), addr, entry, pkt,
441 current_ip);
442
443 if (!entry) {
444 /* We cannot send the packet, the ARP cache is full
445 * or there is already a pending query to this IP
446 * address, so this packet must be discarded.
447 */
448 NET_DBG("Resending ARP %p", req);
449 }
450
451 if (!req && entry) {
452 /* Add the arp entry back to arp_free_entries, to avoid the
453 * arp entry is leak due to ARP packet allocated failed.
454 */
455 sys_slist_prepend(&arp_free_entries, &entry->node);
456 }
457
458 k_mutex_unlock(&arp_mutex);
459 *arp_pkt = req;
460 return req ? NET_ARP_PKT_REPLACED : -ENOMEM;
461 }
462
463 k_mutex_unlock(&arp_mutex);
464
465 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
466 net_if_get_link_addr(entry->iface)->addr,
467 sizeof(struct net_eth_addr));
468
469 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
470 (const uint8_t *)&entry->eth, sizeof(struct net_eth_addr));
471
472 NET_DBG("ARP using ll %s for IP %s",
473 net_sprint_ll_addr(net_pkt_lladdr_dst(pkt)->addr,
474 sizeof(struct net_eth_addr)),
475 net_sprint_ipv4_addr(NET_IPV4_HDR(pkt)->dst));
476
477 return NET_ARP_COMPLETE;
478 }
479
arp_gratuitous(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr)480 static void arp_gratuitous(struct net_if *iface,
481 struct in_addr *src,
482 struct net_eth_addr *hwaddr)
483 {
484 sys_snode_t *prev = NULL;
485 struct arp_entry *entry;
486
487 entry = arp_entry_find(&arp_table, iface, src, &prev);
488 if (entry) {
489 NET_DBG("Gratuitous ARP hwaddr %s -> %s",
490 net_sprint_ll_addr((const uint8_t *)&entry->eth,
491 sizeof(struct net_eth_addr)),
492 net_sprint_ll_addr((const uint8_t *)hwaddr,
493 sizeof(struct net_eth_addr)));
494
495 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
496 }
497 }
498
499 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
arp_gratuitous_send(struct net_if * iface,struct in_addr * ipaddr)500 static void arp_gratuitous_send(struct net_if *iface,
501 struct in_addr *ipaddr)
502 {
503 struct net_arp_hdr *hdr;
504 struct net_pkt *pkt;
505
506 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
507 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
508 if (!pkt) {
509 return;
510 }
511
512 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
513 net_pkt_set_vlan_tag(pkt, net_eth_get_vlan_tag(iface));
514 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
515
516 hdr = NET_ARP_HDR(pkt);
517
518 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
519 hdr->protocol = htons(NET_ETH_PTYPE_IP);
520 hdr->hwlen = sizeof(struct net_eth_addr);
521 hdr->protolen = sizeof(struct in_addr);
522 hdr->opcode = htons(NET_ARP_REQUEST);
523
524 memcpy(&hdr->dst_hwaddr.addr, net_eth_broadcast_addr(),
525 sizeof(struct net_eth_addr));
526 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
527 sizeof(struct net_eth_addr));
528
529 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)ipaddr);
530 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)ipaddr);
531
532 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
533 net_if_get_link_addr(iface)->addr,
534 sizeof(struct net_eth_addr));
535
536 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
537 (uint8_t *)net_eth_broadcast_addr(),
538 sizeof(struct net_eth_addr));
539
540 NET_DBG("Sending gratuitous ARP pkt %p", pkt);
541
542 /* send without timeout, so we do not risk being blocked by tx when
543 * being flooded
544 */
545 if (net_if_try_send_data(iface, pkt, K_NO_WAIT) == NET_DROP) {
546 net_pkt_unref(pkt);
547 }
548 }
549
notify_all_ipv4_addr(struct net_if * iface)550 static void notify_all_ipv4_addr(struct net_if *iface)
551 {
552 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
553 int i;
554
555 if (!ipv4) {
556 return;
557 }
558
559 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
560 if (ipv4->unicast[i].ipv4.is_used &&
561 ipv4->unicast[i].ipv4.address.family == AF_INET &&
562 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED) {
563 arp_gratuitous_send(iface,
564 &ipv4->unicast[i].ipv4.address.in_addr);
565 }
566 }
567 }
568
iface_event_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)569 static void iface_event_handler(struct net_mgmt_event_callback *cb,
570 uint64_t mgmt_event, struct net_if *iface)
571 {
572 ARG_UNUSED(cb);
573
574 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
575 net_eth_is_vlan_interface(iface))) {
576 return;
577 }
578
579 if (mgmt_event != NET_EVENT_IF_UP) {
580 return;
581 }
582
583 notify_all_ipv4_addr(iface);
584 }
585
ipv4_event_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)586 static void ipv4_event_handler(struct net_mgmt_event_callback *cb,
587 uint64_t mgmt_event, struct net_if *iface)
588 {
589 struct in_addr *ipaddr;
590
591 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
592 net_eth_is_vlan_interface(iface))) {
593 return;
594 }
595
596 if (!net_if_is_up(iface)) {
597 return;
598 }
599
600 if (mgmt_event != NET_EVENT_IPV4_ADDR_ADD) {
601 return;
602 }
603
604 if (cb->info_length != sizeof(struct in_addr)) {
605 return;
606 }
607
608 ipaddr = (struct in_addr *)cb->info;
609
610 arp_gratuitous_send(iface, ipaddr);
611 }
612
iface_cb(struct net_if * iface,void * user_data)613 static void iface_cb(struct net_if *iface, void *user_data)
614 {
615 ARG_UNUSED(user_data);
616
617 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
618 net_eth_is_vlan_interface(iface))) {
619 return;
620 }
621
622 if (!net_if_is_up(iface)) {
623 return;
624 }
625
626 notify_all_ipv4_addr(iface);
627 }
628
arp_gratuitous_work_handler(struct k_work * work)629 static void arp_gratuitous_work_handler(struct k_work *work)
630 {
631 ARG_UNUSED(work);
632
633 net_if_foreach(iface_cb, NULL);
634
635 k_work_reschedule(&arp_gratuitous_work,
636 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
637 }
638 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
639
net_arp_update(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr,bool gratuitous,bool force)640 void net_arp_update(struct net_if *iface,
641 struct in_addr *src,
642 struct net_eth_addr *hwaddr,
643 bool gratuitous,
644 bool force)
645 {
646 struct arp_entry *entry;
647 struct net_pkt *pkt;
648
649 NET_DBG("iface %d (%p) src %s", net_if_get_by_iface(iface), iface,
650 net_sprint_ipv4_addr(src));
651 net_if_tx_lock(iface);
652 k_mutex_lock(&arp_mutex, K_FOREVER);
653
654 entry = arp_entry_get_pending(iface, src);
655 if (!entry) {
656 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS) && gratuitous) {
657 arp_gratuitous(iface, src, hwaddr);
658 }
659
660 if (force) {
661 sys_snode_t *prev = NULL;
662 struct arp_entry *arp_ent;
663
664 arp_ent = arp_entry_find(&arp_table, iface, src, &prev);
665 if (arp_ent) {
666 memcpy(&arp_ent->eth, hwaddr,
667 sizeof(struct net_eth_addr));
668 } else {
669 /* Add new entry as it was not found and force
670 * was set.
671 */
672 arp_ent = arp_entry_get_free();
673 if (!arp_ent) {
674 /* Then let's take one from table? */
675 arp_ent = arp_entry_get_last_from_table();
676 }
677
678 if (arp_ent) {
679 arp_ent->req_start = k_uptime_get_32();
680 arp_ent->iface = iface;
681 net_ipaddr_copy(&arp_ent->ip, src);
682 memcpy(&arp_ent->eth, hwaddr, sizeof(arp_ent->eth));
683 sys_slist_prepend(&arp_table, &arp_ent->node);
684 }
685 }
686 }
687
688 k_mutex_unlock(&arp_mutex);
689 net_if_tx_unlock(iface);
690 return;
691 }
692
693 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
694
695 /* Inserting entry into the table */
696 sys_slist_prepend(&arp_table, &entry->node);
697
698 while (!k_fifo_is_empty(&entry->pending_queue)) {
699 int ret;
700
701 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
702
703 /* Set the dst in the pending packet */
704 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
705 (const uint8_t *)&NET_ETH_HDR(pkt)->dst.addr,
706 sizeof(struct net_eth_addr));
707
708 NET_DBG("iface %d (%p) dst %s pending %p frag %p ptype 0x%04x",
709 net_if_get_by_iface(iface), iface,
710 net_sprint_ipv4_addr(&entry->ip),
711 pkt, pkt->frags, net_pkt_ll_proto_type(pkt));
712
713 /* We directly send the packet without first queueing it.
714 * The pkt has already been queued for sending, once by
715 * net_if and second time in the ARP queue. We must not
716 * queue it twice in net_if so that the statistics of
717 * the pkt are not counted twice and the packet filter
718 * callbacks are only called once.
719 */
720 ret = net_if_l2(iface)->send(iface, pkt);
721 if (ret < 0) {
722 net_pkt_unref(pkt);
723 }
724 }
725
726 k_mutex_unlock(&arp_mutex);
727 net_if_tx_unlock(iface);
728 }
729
arp_prepare_reply(struct net_if * iface,struct net_pkt * req,struct net_eth_addr * dst_addr)730 static inline struct net_pkt *arp_prepare_reply(struct net_if *iface,
731 struct net_pkt *req,
732 struct net_eth_addr *dst_addr)
733 {
734 struct net_arp_hdr *hdr, *query;
735 struct net_pkt *pkt;
736
737 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
738 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
739 if (!pkt) {
740 return NULL;
741 }
742
743 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
744
745 hdr = NET_ARP_HDR(pkt);
746 query = NET_ARP_HDR(req);
747
748 if (IS_ENABLED(CONFIG_NET_VLAN)) {
749 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(req));
750 }
751
752 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
753 hdr->protocol = htons(NET_ETH_PTYPE_IP);
754 hdr->hwlen = sizeof(struct net_eth_addr);
755 hdr->protolen = sizeof(struct in_addr);
756 hdr->opcode = htons(NET_ARP_REPLY);
757
758 memcpy(&hdr->dst_hwaddr.addr, &dst_addr->addr,
759 sizeof(struct net_eth_addr));
760 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
761 sizeof(struct net_eth_addr));
762
763 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, query->src_ipaddr);
764 net_ipv4_addr_copy_raw(hdr->src_ipaddr, query->dst_ipaddr);
765
766 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
767 net_if_get_link_addr(iface)->addr,
768 sizeof(struct net_eth_addr));
769
770 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
771 (uint8_t *)&hdr->dst_hwaddr.addr,
772 sizeof(struct net_eth_addr));
773
774 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
775 net_pkt_set_family(pkt, AF_INET);
776
777 return pkt;
778 }
779
arp_hdr_check(struct net_arp_hdr * arp_hdr)780 static bool arp_hdr_check(struct net_arp_hdr *arp_hdr)
781 {
782 if (ntohs(arp_hdr->hwtype) != NET_ARP_HTYPE_ETH ||
783 ntohs(arp_hdr->protocol) != NET_ETH_PTYPE_IP ||
784 arp_hdr->hwlen != sizeof(struct net_eth_addr) ||
785 arp_hdr->protolen != NET_ARP_IPV4_PTYPE_SIZE ||
786 net_ipv4_is_addr_loopback_raw(arp_hdr->src_ipaddr)) {
787 NET_DBG("DROP: Invalid ARP header");
788 return false;
789 }
790
791 return true;
792 }
793
net_arp_input(struct net_pkt * pkt,struct net_eth_addr * src,struct net_eth_addr * dst)794 enum net_verdict net_arp_input(struct net_pkt *pkt,
795 struct net_eth_addr *src,
796 struct net_eth_addr *dst)
797 {
798 struct net_eth_addr *dst_hw_addr;
799 struct net_arp_hdr *arp_hdr;
800 struct in_addr src_ipaddr;
801 struct net_pkt *reply;
802 struct in_addr *addr;
803
804 if (net_pkt_get_len(pkt) < sizeof(struct net_arp_hdr)) {
805 NET_DBG("DROP: Too short ARP msg (%zu bytes, min %zu bytes)",
806 net_pkt_get_len(pkt), sizeof(struct net_arp_hdr));
807 return NET_DROP;
808 }
809
810 arp_hdr = NET_ARP_HDR(pkt);
811 if (!arp_hdr_check(arp_hdr)) {
812 return NET_DROP;
813 }
814
815 switch (ntohs(arp_hdr->opcode)) {
816 case NET_ARP_REQUEST:
817 /* If ARP request sender hw address is our address,
818 * we must drop the packet.
819 */
820 if (memcmp(&arp_hdr->src_hwaddr,
821 net_if_get_link_addr(net_pkt_iface(pkt))->addr,
822 sizeof(struct net_eth_addr)) == 0) {
823 return NET_DROP;
824 }
825
826 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS)) {
827 if (net_eth_is_addr_broadcast(dst) &&
828 (net_eth_is_addr_broadcast(&arp_hdr->dst_hwaddr) ||
829 net_eth_is_addr_all_zeroes(&arp_hdr->dst_hwaddr)) &&
830 net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
831 arp_hdr->src_ipaddr)) {
832 /* If the IP address is in our cache,
833 * then update it here.
834 */
835 net_ipv4_addr_copy_raw(src_ipaddr.s4_addr,
836 arp_hdr->src_ipaddr);
837 net_arp_update(net_pkt_iface(pkt), &src_ipaddr,
838 &arp_hdr->src_hwaddr,
839 true, false);
840 break;
841 }
842 }
843
844 /* Discard ARP request if Ethernet address is broadcast
845 * and Source IP address is Multicast address.
846 */
847 if (memcmp(dst, net_eth_broadcast_addr(),
848 sizeof(struct net_eth_addr)) == 0 &&
849 net_ipv4_is_addr_mcast_raw(arp_hdr->src_ipaddr)) {
850 NET_DBG("DROP: eth addr is bcast, src addr is mcast");
851 return NET_DROP;
852 }
853
854 /* Someone wants to know our ll address */
855 addr = if_get_addr(net_pkt_iface(pkt), arp_hdr->dst_ipaddr);
856 if (!addr) {
857 /* Not for us so drop the packet silently */
858 return NET_DROP;
859 }
860
861 NET_DBG("ARP request from %s [%s] for %s",
862 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
863 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
864 arp_hdr->hwlen),
865 net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr));
866
867 /* Update the ARP cache if the sender MAC address has
868 * changed. In this case the target MAC address is all zeros
869 * and the target IP address is our address.
870 */
871 if (net_eth_is_addr_unspecified(&arp_hdr->dst_hwaddr)) {
872 NET_DBG("Updating ARP cache for %s [%s] iface %d",
873 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
874 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
875 arp_hdr->hwlen),
876 net_if_get_by_iface(net_pkt_iface(pkt)));
877
878 net_ipv4_addr_copy_raw(src_ipaddr.s4_addr,
879 arp_hdr->src_ipaddr);
880 net_arp_update(net_pkt_iface(pkt), &src_ipaddr,
881 &arp_hdr->src_hwaddr,
882 false, true);
883
884 dst_hw_addr = &arp_hdr->src_hwaddr;
885 } else {
886 dst_hw_addr = src;
887 }
888
889 /* Send reply */
890 reply = arp_prepare_reply(net_pkt_iface(pkt), pkt, dst_hw_addr);
891 if (reply) {
892 net_if_try_queue_tx(net_pkt_iface(reply), reply, K_NO_WAIT);
893 } else {
894 NET_DBG("Cannot send ARP reply");
895 }
896 break;
897
898 case NET_ARP_REPLY:
899 if (net_ipv4_is_my_addr_raw(arp_hdr->dst_ipaddr)) {
900 NET_DBG("Received ll %s for IP %s",
901 net_sprint_ll_addr(arp_hdr->src_hwaddr.addr,
902 sizeof(struct net_eth_addr)),
903 net_sprint_ipv4_addr(arp_hdr->src_ipaddr));
904
905 net_ipv4_addr_copy_raw(src_ipaddr.s4_addr,
906 arp_hdr->src_ipaddr);
907 net_arp_update(net_pkt_iface(pkt), &src_ipaddr,
908 &arp_hdr->src_hwaddr,
909 false, false);
910 }
911
912 break;
913 }
914
915 net_pkt_unref(pkt);
916
917 return NET_OK;
918 }
919
net_arp_clear_cache(struct net_if * iface)920 void net_arp_clear_cache(struct net_if *iface)
921 {
922 sys_snode_t *prev = NULL;
923 struct arp_entry *entry, *next;
924
925 NET_DBG("Flushing ARP table");
926
927 k_mutex_lock(&arp_mutex, K_FOREVER);
928
929 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_table, entry, next, node) {
930 if (iface && iface != entry->iface) {
931 prev = &entry->node;
932 continue;
933 }
934
935 arp_entry_cleanup(entry, false);
936
937 sys_slist_remove(&arp_table, prev, &entry->node);
938 sys_slist_prepend(&arp_free_entries, &entry->node);
939 }
940
941 prev = NULL;
942
943 NET_DBG("Flushing ARP pending requests");
944
945 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
946 entry, next, node) {
947 if (iface && iface != entry->iface) {
948 prev = &entry->node;
949 continue;
950 }
951
952 arp_entry_cleanup(entry, true);
953
954 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
955 sys_slist_prepend(&arp_free_entries, &entry->node);
956 }
957
958 if (sys_slist_is_empty(&arp_pending_entries)) {
959 k_work_cancel_delayable(&arp_request_timer);
960 }
961
962 k_mutex_unlock(&arp_mutex);
963 }
964
net_arp_clear_pending(struct net_if * iface,struct in_addr * dst)965 int net_arp_clear_pending(struct net_if *iface, struct in_addr *dst)
966 {
967 struct arp_entry *entry = arp_entry_find_pending(iface, dst);
968
969 if (!entry) {
970 return -ENOENT;
971 }
972
973 arp_entry_cleanup(entry, true);
974
975 return 0;
976 }
977
net_arp_foreach(net_arp_cb_t cb,void * user_data)978 int net_arp_foreach(net_arp_cb_t cb, void *user_data)
979 {
980 int ret = 0;
981 struct arp_entry *entry;
982
983 k_mutex_lock(&arp_mutex, K_FOREVER);
984
985 SYS_SLIST_FOR_EACH_CONTAINER(&arp_table, entry, node) {
986 ret++;
987 cb(entry, user_data);
988 }
989
990 k_mutex_unlock(&arp_mutex);
991
992 return ret;
993 }
994
net_arp_init(void)995 void net_arp_init(void)
996 {
997 int i;
998
999 if (arp_cache_initialized) {
1000 return;
1001 }
1002
1003 sys_slist_init(&arp_free_entries);
1004 sys_slist_init(&arp_pending_entries);
1005 sys_slist_init(&arp_table);
1006
1007 for (i = 0; i < CONFIG_NET_ARP_TABLE_SIZE; i++) {
1008 /* Inserting entry as free with initialised packet queue */
1009 k_fifo_init(&arp_entries[i].pending_queue);
1010 sys_slist_prepend(&arp_free_entries, &arp_entries[i].node);
1011 }
1012
1013 k_work_init_delayable(&arp_request_timer, arp_request_timeout);
1014
1015 k_mutex_init(&arp_mutex);
1016
1017 arp_cache_initialized = true;
1018
1019 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
1020 net_mgmt_init_event_callback(&iface_event_cb, iface_event_handler,
1021 NET_EVENT_IF_UP);
1022 net_mgmt_init_event_callback(&ipv4_event_cb, ipv4_event_handler,
1023 NET_EVENT_IPV4_ADDR_ADD);
1024
1025 net_mgmt_add_event_callback(&iface_event_cb);
1026 net_mgmt_add_event_callback(&ipv4_event_cb);
1027
1028 k_work_init_delayable(&arp_gratuitous_work,
1029 arp_gratuitous_work_handler);
1030 k_work_reschedule(&arp_gratuitous_work,
1031 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
1032 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
1033 }
1034
arp_recv(struct net_if * iface,uint16_t ptype,struct net_pkt * pkt)1035 static enum net_verdict arp_recv(struct net_if *iface,
1036 uint16_t ptype,
1037 struct net_pkt *pkt)
1038 {
1039 ARG_UNUSED(iface);
1040 ARG_UNUSED(ptype);
1041
1042 net_pkt_set_family(pkt, AF_INET);
1043
1044 NET_DBG("ARP packet from %s received",
1045 net_sprint_ll_addr(net_pkt_lladdr_src(pkt)->addr,
1046 sizeof(struct net_eth_addr)));
1047
1048 if (IS_ENABLED(CONFIG_NET_IPV4_ACD) &&
1049 net_ipv4_acd_input(iface, pkt) == NET_DROP) {
1050 return NET_DROP;
1051 }
1052
1053 return net_arp_input(pkt,
1054 (struct net_eth_addr *)net_pkt_lladdr_src(pkt)->addr,
1055 (struct net_eth_addr *)net_pkt_lladdr_dst(pkt)->addr);
1056 }
1057
1058 ETH_NET_L3_REGISTER(ARP, NET_ETH_PTYPE_ARP, arp_recv);
1059