1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <errno.h>
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_mgmt.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/net/conn_mgr_connectivity.h>
18 #include "conn_mgr_private.h"
19
20 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
21 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
22 #else
23 #define THREAD_PRIORITY K_PRIO_PREEMPT(7)
24 #endif
25
26 static K_THREAD_STACK_DEFINE(conn_mgr_mon_stack,
27 CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE);
28 static struct k_thread conn_mgr_mon_thread;
29
30 /* Internal state array tracking readiness, flags, and other state information for all available
31 * ifaces. Note that indexing starts at 0, whereas Zephyr iface indices start at 1.
32 * conn_mgr_mon_get_if_by_index and conn_mgr_get_index_for_if are used to go back and forth between
33 * iface_states indices and Zephyr iface pointers.
34 */
35 static uint16_t iface_states[CONN_MGR_IFACE_MAX];
36
37 /* Tracks the most recent total quantity of L4-ready ifaces (any, IPv4, IPv6) */
38 static uint16_t last_ready_count;
39 static uint16_t last_ready_count_ipv4;
40 static uint16_t last_ready_count_ipv6;
41
42 /* Tracks the last ifaces to cause a major state change (any, IPv4, IPv6) */
43 static struct net_if *last_blame;
44 static struct net_if *last_blame_ipv4;
45 static struct net_if *last_blame_ipv6;
46
47 /* Used to signal when modifications have been made that need to be responded to */
48 K_SEM_DEFINE(conn_mgr_mon_updated, 1, 1);
49
50 /* Used to protect conn_mgr_monitor state */
51 K_MUTEX_DEFINE(conn_mgr_mon_lock);
52
conn_mgr_if_state_internal(void)53 uint16_t *conn_mgr_if_state_internal(void)
54 {
55 return iface_states;
56 }
57
58 /**
59 * @brief Retrieves pointer to an iface by the index that corresponds to it in iface_states
60 *
61 * @param index - The index in iface_states to find the corresponding iface for.
62 * @return net_if* - The corresponding iface.
63 */
conn_mgr_mon_get_if_by_index(int index)64 static struct net_if *conn_mgr_mon_get_if_by_index(int index)
65 {
66 return net_if_get_by_index(index + 1);
67 }
68
69 /**
70 * @brief Gets the index in iface_states for the state corresponding to a provided iface.
71 *
72 * @param iface - iface to find the index of.
73 * @return int - The index found.
74 */
conn_mgr_get_index_for_if(struct net_if * iface)75 static int conn_mgr_get_index_for_if(struct net_if *iface)
76 {
77 return net_if_get_by_iface(iface) - 1;
78 }
79
80 /**
81 * @brief Conveniently update iface readiness state
82 *
83 * @param idx - index (in iface_states) of the iface to mark ready or unready
84 * @param ready - true if the iface should be considered ready, otherwise false
85 * @param ready_ipv4 - true if the iface is ready with IPv4, otherwise false
86 * @param ready_ipv6 - true if the iface is ready with IPv6, otherwise false
87 */
conn_mgr_mon_set_ready(int idx,bool ready,bool ready_ipv4,bool ready_ipv6)88 static void conn_mgr_mon_set_ready(int idx, bool ready, bool ready_ipv4, bool ready_ipv6)
89 {
90 /* Clear and then update the L4-readiness bit */
91 iface_states[idx] &= ~CONN_MGR_IF_READY;
92 iface_states[idx] &= ~CONN_MGR_IF_READY_IPV4;
93 iface_states[idx] &= ~CONN_MGR_IF_READY_IPV6;
94
95 if (ready) {
96 iface_states[idx] |= CONN_MGR_IF_READY;
97 }
98
99 if (ready_ipv4) {
100 iface_states[idx] |= CONN_MGR_IF_READY_IPV4;
101 }
102
103 if (ready_ipv6) {
104 iface_states[idx] |= CONN_MGR_IF_READY_IPV6;
105 }
106 }
107
conn_mgr_mon_handle_update(void)108 static void conn_mgr_mon_handle_update(void)
109 {
110 int idx;
111 bool has_ip;
112 bool has_ipv6;
113 bool has_ipv4;
114 bool is_l4_ready;
115 bool is_ipv6_ready;
116 bool is_ipv4_ready;
117 bool is_oper_up;
118 bool was_l4_ready;
119 bool was_ipv6_ready;
120 bool was_ipv4_ready;
121 bool is_ignored;
122 int ready_count = 0;
123 int ready_count_ipv4 = 0;
124 int ready_count_ipv6 = 0;
125 struct net_if *blame = NULL;
126 struct net_if *blame_ipv4 = NULL;
127 struct net_if *blame_ipv6 = NULL;
128
129 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
130
131 for (idx = 0; idx < ARRAY_SIZE(iface_states); idx++) {
132 if (iface_states[idx] == 0) {
133 /* This interface is not used */
134 continue;
135 }
136
137 /* Detect whether iface was previously considered ready */
138 was_l4_ready = iface_states[idx] & CONN_MGR_IF_READY;
139 was_ipv6_ready = iface_states[idx] & CONN_MGR_IF_READY_IPV6;
140 was_ipv4_ready = iface_states[idx] & CONN_MGR_IF_READY_IPV4;
141
142 /* Collect iface readiness requirements */
143 has_ipv6 = iface_states[idx] & CONN_MGR_IF_IPV6_SET;
144 has_ipv4 = iface_states[idx] & CONN_MGR_IF_IPV4_SET;
145 has_ip = has_ipv6 || has_ipv4;
146 is_oper_up = iface_states[idx] & CONN_MGR_IF_UP;
147 is_ignored = iface_states[idx] & CONN_MGR_IF_IGNORED;
148
149 /* Determine whether iface is currently considered ready */
150 is_l4_ready = is_oper_up && has_ip && !is_ignored;
151 is_ipv6_ready = is_oper_up && has_ipv6 && !is_ignored;
152 is_ipv4_ready = is_oper_up && has_ipv4 && !is_ignored;
153
154 /* Track ready iface count */
155 if (is_l4_ready) {
156 ready_count += 1;
157 }
158 if (is_ipv6_ready) {
159 ready_count_ipv6 += 1;
160 }
161 if (is_ipv4_ready) {
162 ready_count_ipv4 += 1;
163 }
164
165 /* If any states changed, track blame for possibly triggered events */
166 if (was_l4_ready != is_l4_ready) {
167 blame = conn_mgr_mon_get_if_by_index(idx);
168 }
169 if (was_ipv6_ready != is_ipv6_ready) {
170 blame_ipv6 = conn_mgr_mon_get_if_by_index(idx);
171 }
172 if (was_ipv4_ready != is_ipv4_ready) {
173 blame_ipv4 = conn_mgr_mon_get_if_by_index(idx);
174 }
175
176 /* Update readiness state flags with the (possibly) new values */
177 conn_mgr_mon_set_ready(idx, is_l4_ready, is_ipv4_ready, is_ipv6_ready);
178 }
179
180 /* If the total number of ready ifaces changed, possibly send an event */
181 if (ready_count != last_ready_count) {
182 if (ready_count == 0) {
183 /* We just lost connectivity */
184 net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, blame);
185 } else if (last_ready_count == 0) {
186 /* We just gained connectivity */
187 net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, blame);
188 }
189 last_ready_count = ready_count;
190 last_blame = blame;
191 }
192
193 /* Same, but specifically for IPv4 */
194 if (ready_count_ipv4 != last_ready_count_ipv4) {
195 if (ready_count_ipv4 == 0) {
196 /* We just lost IPv4 connectivity */
197 net_mgmt_event_notify(NET_EVENT_L4_IPV4_DISCONNECTED, blame_ipv4);
198 } else if (last_ready_count_ipv4 == 0) {
199 /* We just gained IPv4 connectivity */
200 net_mgmt_event_notify(NET_EVENT_L4_IPV4_CONNECTED, blame_ipv4);
201 }
202 last_ready_count_ipv4 = ready_count_ipv4;
203 last_blame_ipv4 = blame_ipv4;
204 }
205
206 /* Same, but specifically for IPv6 */
207 if (ready_count_ipv6 != last_ready_count_ipv6) {
208 if (ready_count_ipv6 == 0) {
209 /* We just lost IPv6 connectivity */
210 net_mgmt_event_notify(NET_EVENT_L4_IPV6_DISCONNECTED, blame_ipv6);
211 } else if (last_ready_count_ipv6 == 0) {
212 /* We just gained IPv6 connectivity */
213 net_mgmt_event_notify(NET_EVENT_L4_IPV6_CONNECTED, blame_ipv6);
214 }
215 last_ready_count_ipv6 = ready_count_ipv6;
216 last_blame_ipv6 = blame_ipv6;
217 }
218
219 k_mutex_unlock(&conn_mgr_mon_lock);
220 }
221
222 /**
223 * @brief Initialize the internal state flags for the given iface using its current status
224 *
225 * @param iface - iface to initialize from.
226 */
conn_mgr_mon_initial_state(struct net_if * iface)227 static void conn_mgr_mon_initial_state(struct net_if *iface)
228 {
229 int idx = conn_mgr_get_index_for_if(iface);
230
231 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
232
233 if (net_if_is_up(iface)) {
234 NET_DBG("Iface %p UP", iface);
235 iface_states[idx] |= CONN_MGR_IF_UP;
236 }
237
238 if (IS_ENABLED(CONFIG_NET_NATIVE_IPV6)) {
239 if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
240 NET_DBG("IPv6 addr set");
241 iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
242 }
243 }
244
245 if (IS_ENABLED(CONFIG_NET_NATIVE_IPV4)) {
246 if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
247 NET_DBG("IPv4 addr set");
248 iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
249 }
250
251 }
252
253 k_mutex_unlock(&conn_mgr_mon_lock);
254 }
255
conn_mgr_mon_init_cb(struct net_if * iface,void * user_data)256 static void conn_mgr_mon_init_cb(struct net_if *iface, void *user_data)
257 {
258 ARG_UNUSED(user_data);
259
260 conn_mgr_mon_initial_state(iface);
261 }
262
conn_mgr_mon_thread_fn(void * p1,void * p2,void * p3)263 static void conn_mgr_mon_thread_fn(void *p1, void *p2, void *p3)
264 {
265 ARG_UNUSED(p1);
266 ARG_UNUSED(p2);
267 ARG_UNUSED(p3);
268
269 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
270
271 conn_mgr_conn_init();
272
273 conn_mgr_init_events_handler();
274
275 net_if_foreach(conn_mgr_mon_init_cb, NULL);
276
277 k_mutex_unlock(&conn_mgr_mon_lock);
278
279 NET_DBG("Connection Manager started");
280
281 while (true) {
282 /* Wait for changes */
283 k_sem_take(&conn_mgr_mon_updated, K_FOREVER);
284
285 /* Respond to changes */
286 conn_mgr_mon_handle_update();
287 }
288 }
289
conn_mgr_mon_resend_status(void)290 void conn_mgr_mon_resend_status(void)
291 {
292 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
293
294 if (last_ready_count == 0) {
295 net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_blame);
296 } else {
297 net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_blame);
298 }
299
300 if (last_ready_count_ipv6 == 0) {
301 net_mgmt_event_notify(NET_EVENT_L4_IPV6_DISCONNECTED, last_blame_ipv6);
302 } else {
303 net_mgmt_event_notify(NET_EVENT_L4_IPV6_CONNECTED, last_blame_ipv6);
304 }
305
306 if (last_ready_count_ipv4 == 0) {
307 net_mgmt_event_notify(NET_EVENT_L4_IPV4_DISCONNECTED, last_blame_ipv4);
308 } else {
309 net_mgmt_event_notify(NET_EVENT_L4_IPV4_CONNECTED, last_blame_ipv4);
310 }
311
312 k_mutex_unlock(&conn_mgr_mon_lock);
313 }
314
conn_mgr_ignore_iface(struct net_if * iface)315 void conn_mgr_ignore_iface(struct net_if *iface)
316 {
317 int idx = conn_mgr_get_index_for_if(iface);
318
319 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
320
321 if (!(iface_states[idx] & CONN_MGR_IF_IGNORED)) {
322 /* Set ignored flag and mark state as changed */
323 iface_states[idx] |= CONN_MGR_IF_IGNORED;
324 k_sem_give(&conn_mgr_mon_updated);
325 }
326
327 k_mutex_unlock(&conn_mgr_mon_lock);
328 }
329
conn_mgr_watch_iface(struct net_if * iface)330 void conn_mgr_watch_iface(struct net_if *iface)
331 {
332 int idx = conn_mgr_get_index_for_if(iface);
333
334 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
335
336 if (iface_states[idx] & CONN_MGR_IF_IGNORED) {
337 /* Clear ignored flag and mark state as changed */
338 iface_states[idx] &= ~CONN_MGR_IF_IGNORED;
339 k_sem_give(&conn_mgr_mon_updated);
340 }
341
342 k_mutex_unlock(&conn_mgr_mon_lock);
343 }
344
conn_mgr_is_iface_ignored(struct net_if * iface)345 bool conn_mgr_is_iface_ignored(struct net_if *iface)
346 {
347 int idx = conn_mgr_get_index_for_if(iface);
348
349 bool ret = false;
350
351 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
352
353 ret = iface_states[idx] & CONN_MGR_IF_IGNORED;
354
355 k_mutex_unlock(&conn_mgr_mon_lock);
356
357 return ret;
358 }
359
360 /**
361 * @brief Check whether a provided iface uses the provided L2.
362 *
363 * @param iface - iface to check.
364 * @param l2 - L2 to check. NULL will match offloaded ifaces.
365 * @retval true if the iface uses the provided L2.
366 * @retval false otherwise.
367 */
iface_uses_l2(struct net_if * iface,const struct net_l2 * l2)368 static bool iface_uses_l2(struct net_if *iface, const struct net_l2 *l2)
369 {
370 return (!l2 && net_if_offload(iface)) ||
371 (net_if_l2(iface) == l2);
372 }
373
conn_mgr_ignore_l2(const struct net_l2 * l2)374 void conn_mgr_ignore_l2(const struct net_l2 *l2)
375 {
376 /* conn_mgr_ignore_iface already locks the mutex, but we lock it here too
377 * so that all matching ifaces are updated simultaneously.
378 */
379 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
380
381 STRUCT_SECTION_FOREACH(net_if, iface) {
382 if (iface_uses_l2(iface, l2)) {
383 conn_mgr_ignore_iface(iface);
384 }
385 }
386
387 k_mutex_unlock(&conn_mgr_mon_lock);
388 }
389
conn_mgr_watch_l2(const struct net_l2 * l2)390 void conn_mgr_watch_l2(const struct net_l2 *l2)
391 {
392 /* conn_mgr_watch_iface already locks the mutex, but we lock it here too
393 * so that all matching ifaces are updated simultaneously.
394 */
395 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
396
397 STRUCT_SECTION_FOREACH(net_if, iface) {
398 if (iface_uses_l2(iface, l2)) {
399 conn_mgr_watch_iface(iface);
400 }
401 }
402
403 k_mutex_unlock(&conn_mgr_mon_lock);
404 }
405
conn_mgr_mon_init(void)406 static int conn_mgr_mon_init(void)
407 {
408 k_thread_create(&conn_mgr_mon_thread, conn_mgr_mon_stack,
409 CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE,
410 conn_mgr_mon_thread_fn,
411 NULL, NULL, NULL, THREAD_PRIORITY, 0, K_NO_WAIT);
412 k_thread_name_set(&conn_mgr_mon_thread, "conn_mgr_monitor");
413
414 return 0;
415 }
416
conn_mgr_if_state(struct net_if * iface)417 uint16_t conn_mgr_if_state(struct net_if *iface)
418 {
419 int idx = conn_mgr_get_index_for_if(iface);
420 uint16_t state = CONN_MGR_IF_STATE_INVALID;
421
422 if (idx < CONN_MGR_IFACE_MAX) {
423 k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
424 state = iface_states[idx];
425 k_mutex_unlock(&conn_mgr_mon_lock);
426 }
427
428 return state;
429 }
430
431 SYS_INIT(conn_mgr_mon_init, APPLICATION, CONFIG_NET_CONNECTION_MANAGER_MONITOR_PRIORITY);
432