1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_DECLARE(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9
10 #include <errno.h>
11 #include <zephyr/net/net_if.h>
12 #include <zephyr/net/net_mgmt.h>
13 #include "conn_mgr_private.h"
14
15 static struct net_mgmt_event_callback iface_events_cb;
16 static struct net_mgmt_event_callback ipv6_events_cb;
17 static struct net_mgmt_event_callback ipv4_events_cb;
18
conn_mgr_iface_events_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)19 static void conn_mgr_iface_events_handler(struct net_mgmt_event_callback *cb,
20 uint64_t mgmt_event,
21 struct net_if *iface)
22 {
23 uint16_t *iface_states = conn_mgr_if_state_internal();
24 int idx;
25
26 NET_DBG("%s event 0x%" PRIx64 " received on iface %d (%p)", "Iface", mgmt_event,
27 net_if_get_by_iface(iface), iface);
28
29 if ((mgmt_event & CONN_MGR_IFACE_EVENTS_MASK) != mgmt_event) {
30 return;
31 }
32
33 idx = net_if_get_by_iface(iface) - 1;
34
35 NET_DBG("Iface index %u", idx);
36
37 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
38
39 switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
40 case NET_EVENT_IF_CMD_DOWN:
41 iface_states[idx] &= ~CONN_MGR_IF_UP;
42 break;
43 case NET_EVENT_IF_CMD_UP:
44 iface_states[idx] |= CONN_MGR_IF_UP;
45 break;
46 default:
47 goto done;
48 }
49 k_sem_give(&conn_mgr_mon_updated);
50
51 done:
52 k_mutex_unlock(&conn_mgr_mon_lock);
53 }
54
55 #if defined(CONFIG_NET_IPV6)
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)56 static void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
57 uint64_t mgmt_event,
58 struct net_if *iface)
59 {
60 uint16_t *iface_states = conn_mgr_if_state_internal();
61 int idx;
62
63 NET_DBG("%s event 0x%" PRIx64 " received on iface %d (%p)", "IPv6", mgmt_event,
64 net_if_get_by_iface(iface), iface);
65
66 if ((mgmt_event & CONN_MGR_IPV6_EVENTS_MASK) != mgmt_event) {
67 return;
68 }
69
70 idx = net_if_get_by_iface(iface) - 1;
71
72 NET_DBG("Iface index %u", idx);
73
74 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
75
76 switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
77 case NET_EVENT_IPV6_CMD_DAD_SUCCEED:
78 __fallthrough;
79 case NET_EVENT_IPV6_CMD_ADDR_ADD:
80 if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
81 iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
82 }
83 break;
84 case NET_EVENT_IPV6_CMD_DAD_FAILED:
85 __fallthrough;
86 case NET_EVENT_IPV6_CMD_ADDR_DEL:
87 if (!net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
88 iface_states[idx] &= ~CONN_MGR_IF_IPV6_SET;
89 }
90
91 break;
92 default:
93 goto done;
94 }
95
96 k_sem_give(&conn_mgr_mon_updated);
97
98 done:
99 k_mutex_unlock(&conn_mgr_mon_lock);
100 }
101 #else
102 static inline
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)103 void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
104 uint64_t mgmt_event,
105 struct net_if *iface)
106 {
107 ARG_UNUSED(cb);
108 ARG_UNUSED(mgmt_event);
109 ARG_UNUSED(iface);
110 }
111 #endif /* CONFIG_NET_IPV6 */
112
113 #if defined(CONFIG_NET_IPV4)
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)114 static void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
115 uint64_t mgmt_event,
116 struct net_if *iface)
117 {
118 uint16_t *iface_states = conn_mgr_if_state_internal();
119 int idx;
120
121 NET_DBG("%s event 0x%" PRIx64 " received on iface %d (%p)", "IPv4", mgmt_event,
122 net_if_get_by_iface(iface), iface);
123
124 if ((mgmt_event & CONN_MGR_IPV4_EVENTS_MASK) != mgmt_event) {
125 return;
126 }
127
128 idx = net_if_get_by_iface(iface) - 1;
129
130 NET_DBG("Iface index %u", idx);
131
132 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
133
134 switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
135 case NET_EVENT_IPV4_CMD_ACD_SUCCEED:
136 __fallthrough;
137 case NET_EVENT_IPV4_CMD_ADDR_ADD:
138 if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
139 iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
140 }
141
142 break;
143 case NET_EVENT_IPV4_CMD_ACD_FAILED:
144 __fallthrough;
145 case NET_EVENT_IPV4_CMD_ADDR_DEL:
146 if (!net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
147 iface_states[idx] &= ~CONN_MGR_IF_IPV4_SET;
148 }
149
150 break;
151 default:
152 goto done;
153 }
154
155 k_sem_give(&conn_mgr_mon_updated);
156
157 done:
158 k_mutex_unlock(&conn_mgr_mon_lock);
159 }
160 #else
161 static inline
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint64_t mgmt_event,struct net_if * iface)162 void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
163 uint64_t mgmt_event,
164 struct net_if *iface)
165 {
166 ARG_UNUSED(cb);
167 ARG_UNUSED(mgmt_event);
168 ARG_UNUSED(iface);
169 }
170 #endif /* CONFIG_NET_IPV4 */
171
conn_mgr_init_events_handler(void)172 void conn_mgr_init_events_handler(void)
173 {
174 net_mgmt_init_event_callback(&iface_events_cb,
175 conn_mgr_iface_events_handler,
176 CONN_MGR_IFACE_EVENTS_MASK);
177 net_mgmt_add_event_callback(&iface_events_cb);
178
179 if (IS_ENABLED(CONFIG_NET_IPV6)) {
180 net_mgmt_init_event_callback(&ipv6_events_cb,
181 conn_mgr_ipv6_events_handler,
182 CONN_MGR_IPV6_EVENTS_MASK);
183 net_mgmt_add_event_callback(&ipv6_events_cb);
184 }
185
186 if (IS_ENABLED(CONFIG_NET_IPV4)) {
187 net_mgmt_init_event_callback(&ipv4_events_cb,
188 conn_mgr_ipv4_events_handler,
189 CONN_MGR_IPV4_EVENTS_MASK);
190 net_mgmt_add_event_callback(&ipv4_events_cb);
191 }
192 }
193