1 /** @file
2 * @brief IPv6 MLD related functions
3 */
4
5 /*
6 * Copyright (c) 2018 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_DECLARE(net_ipv6, CONFIG_NET_IPV6_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/net/mld.h>
16 #include <zephyr/net/net_core.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/net_stats.h>
19 #include <zephyr/net/net_context.h>
20 #include <zephyr/net/net_mgmt.h>
21 #include <zephyr/net/icmp.h>
22 #include "net_private.h"
23 #include "connection.h"
24 #include "icmpv6.h"
25 #include "udp_internal.h"
26 #include "tcp_internal.h"
27 #include "ipv6.h"
28 #include "nbr.h"
29 #include "6lo.h"
30 #include "route.h"
31 #include "net_stats.h"
32
33 /* Timeout for various buffer allocations in this file. */
34 #define PKT_WAIT_TIME K_MSEC(50)
35
36 #define MLDv2_MCAST_RECORD_LEN sizeof(struct net_icmpv6_mld_mcast_record)
37 #define IPV6_OPT_HDR_ROUTER_ALERT_LEN 8
38 #define MLDV2_REPORT_RESERVED_BYTES 2
39
40 #define MLDv2_LEN (MLDv2_MCAST_RECORD_LEN + sizeof(struct in6_addr))
41
42 /* Internal structure used for appending multicast routes to MLDv2 reports */
43 struct mcast_route_appending_info {
44 int status;
45 struct net_pkt *pkt;
46 struct net_if *iface;
47 size_t skipped;
48 };
49
mld_create(struct net_pkt * pkt,const struct in6_addr * addr,uint8_t record_type)50 static int mld_create(struct net_pkt *pkt,
51 const struct in6_addr *addr,
52 uint8_t record_type)
53 {
54 NET_PKT_DATA_ACCESS_DEFINE(mld_access,
55 struct net_icmpv6_mld_mcast_record);
56 struct net_icmpv6_mld_mcast_record *mld;
57
58 mld = (struct net_icmpv6_mld_mcast_record *)
59 net_pkt_get_data(pkt, &mld_access);
60 if (!mld) {
61 return -ENOBUFS;
62 }
63
64 mld->record_type = record_type;
65 mld->aux_data_len = 0U;
66 mld->num_sources = 0U;
67
68 net_ipv6_addr_copy_raw(mld->mcast_address, (uint8_t *)addr);
69
70 if (net_pkt_set_data(pkt, &mld_access)) {
71 return -ENOBUFS;
72 }
73
74 return 0;
75 }
76
mld_create_packet(struct net_pkt * pkt,uint16_t count)77 static int mld_create_packet(struct net_pkt *pkt, uint16_t count)
78 {
79 struct in6_addr dst;
80
81 /* Sent to all MLDv2-capable routers */
82 net_ipv6_addr_create(&dst, 0xff02, 0, 0, 0, 0, 0, 0, 0x0016);
83
84 net_pkt_set_ipv6_hop_limit(pkt, 1); /* RFC 3810 ch 7.4 */
85
86 if (net_ipv6_create(pkt, net_if_ipv6_select_src_addr(
87 net_pkt_iface(pkt), &dst),
88 &dst)) {
89 return -ENOBUFS;
90 }
91
92 /* Add hop-by-hop option and router alert option, RFC 3810 ch 5. */
93 if (net_pkt_write_u8(pkt, IPPROTO_ICMPV6) ||
94 net_pkt_write_u8(pkt, 0)) {
95 return -ENOBUFS;
96 }
97
98 /* IPv6 router alert option is described in RFC 2711.
99 * - 0x0502 RFC 2711 ch 2.1
100 * - MLD (value 0)
101 * - 2 bytes of padding
102 */
103 if (net_pkt_write_be16(pkt, 0x0502) ||
104 net_pkt_write_be16(pkt, 0) ||
105 net_pkt_write_be16(pkt, 0)) {
106 return -ENOBUFS;
107 }
108
109 net_pkt_set_ipv6_ext_len(pkt, IPV6_OPT_HDR_ROUTER_ALERT_LEN);
110
111 /* ICMPv6 header + reserved space + count.
112 * MLDv6 stuff will come right after
113 */
114 if (net_icmpv6_create(pkt, NET_ICMPV6_MLDv2, 0) ||
115 net_pkt_write_be16(pkt, 0) ||
116 net_pkt_write_be16(pkt, count)) {
117 return -ENOBUFS;
118 }
119
120 net_pkt_set_ipv6_next_hdr(pkt, NET_IPV6_NEXTHDR_HBHO);
121
122 return 0;
123 }
124
mld_send(struct net_pkt * pkt)125 static int mld_send(struct net_pkt *pkt)
126 {
127 int ret;
128
129 net_pkt_cursor_init(pkt);
130 net_ipv6_finalize(pkt, IPPROTO_ICMPV6);
131
132 ret = net_send_data(pkt);
133 if (ret < 0) {
134 net_stats_update_icmp_drop(net_pkt_iface(pkt));
135 net_stats_update_ipv6_mld_drop(net_pkt_iface(pkt));
136
137 net_pkt_unref(pkt);
138
139 return ret;
140 }
141
142 net_stats_update_icmp_sent(net_pkt_iface(pkt));
143 net_stats_update_ipv6_mld_sent(net_pkt_iface(pkt));
144
145 return 0;
146 }
147
148 #if defined(CONFIG_NET_MCAST_ROUTE_MLD_REPORTS)
count_mcast_routes(struct net_route_entry_mcast * entry,void * user_data)149 static void count_mcast_routes(struct net_route_entry_mcast *entry, void *user_data)
150 {
151 (*((int *)user_data))++;
152 }
153
append_mcast_routes(struct net_route_entry_mcast * entry,void * user_data)154 static void append_mcast_routes(struct net_route_entry_mcast *entry, void *user_data)
155 {
156 struct mcast_route_appending_info *info = (struct mcast_route_appending_info *)user_data;
157 struct net_if_mcast_addr *mcasts = info->iface->config.ip.ipv6->mcast;
158
159 if (info->status != 0 || entry->prefix_len != 128) {
160 return;
161 }
162
163 for (int i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
164 if (!mcasts[i].is_used || !mcasts[i].is_joined) {
165 continue;
166 }
167
168 if (net_ipv6_addr_cmp(&entry->group, &mcasts[i].address.in6_addr)) {
169 /* Address was already added to the report */
170 info->skipped++;
171 return;
172 }
173 }
174
175 info->status = mld_create(info->pkt, &entry->group, NET_IPV6_MLDv2_MODE_IS_EXCLUDE);
176 }
177 #endif
178
net_ipv6_mld_send_single(struct net_if * iface,const struct in6_addr * addr,uint8_t mode)179 int net_ipv6_mld_send_single(struct net_if *iface, const struct in6_addr *addr, uint8_t mode)
180 {
181 struct net_pkt *pkt;
182 int ret;
183
184 pkt = net_pkt_alloc_with_buffer(iface, IPV6_OPT_HDR_ROUTER_ALERT_LEN +
185 NET_ICMPV6_UNUSED_LEN +
186 MLDv2_MCAST_RECORD_LEN +
187 sizeof(struct in6_addr),
188 AF_INET6, IPPROTO_ICMPV6,
189 PKT_WAIT_TIME);
190 if (!pkt) {
191 return -ENOMEM;
192 }
193
194 if (mld_create_packet(pkt, 1) ||
195 mld_create(pkt, addr, mode)) {
196 ret = -ENOBUFS;
197 goto drop;
198 }
199
200 ret = mld_send(pkt);
201 if (ret) {
202 goto drop;
203 }
204
205 return 0;
206
207 drop:
208 net_pkt_unref(pkt);
209
210 return ret;
211 }
212
net_ipv6_mld_join(struct net_if * iface,const struct in6_addr * addr)213 int net_ipv6_mld_join(struct net_if *iface, const struct in6_addr *addr)
214 {
215 struct net_if_mcast_addr *maddr;
216 int ret = 0;
217
218 maddr = net_if_ipv6_maddr_lookup(addr, &iface);
219 if (maddr && net_if_ipv6_maddr_is_joined(maddr)) {
220 return -EALREADY;
221 }
222
223 if (!maddr) {
224 maddr = net_if_ipv6_maddr_add(iface, addr);
225 if (!maddr) {
226 return -ENOMEM;
227 }
228 }
229
230 if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_MLD)) {
231 return 0;
232 }
233
234 if (!net_if_is_up(iface)) {
235 return -ENETDOWN;
236 }
237
238 if (net_if_is_offloaded(iface)) {
239 goto out;
240 }
241
242 ret = net_ipv6_mld_send_single(iface, addr, NET_IPV6_MLDv2_CHANGE_TO_EXCLUDE_MODE);
243 if (ret < 0) {
244 return ret;
245 }
246
247 out:
248 net_if_ipv6_maddr_join(iface, maddr);
249
250 net_if_mcast_monitor(iface, &maddr->address, true);
251
252 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_MCAST_JOIN, iface,
253 &maddr->address.in6_addr,
254 sizeof(struct in6_addr));
255
256 return ret;
257 }
258
net_ipv6_mld_leave(struct net_if * iface,const struct in6_addr * addr)259 int net_ipv6_mld_leave(struct net_if *iface, const struct in6_addr *addr)
260 {
261 struct net_if_mcast_addr *maddr;
262 int ret = 0;
263
264 maddr = net_if_ipv6_maddr_lookup(addr, &iface);
265 if (!maddr) {
266 return -ENOENT;
267 }
268
269 if (!net_if_ipv6_maddr_rm(iface, addr)) {
270 return -EINVAL;
271 }
272
273 if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_MLD)) {
274 return 0;
275 }
276
277 if (net_if_is_offloaded(iface)) {
278 goto out;
279 }
280
281 ret = net_ipv6_mld_send_single(iface, addr, NET_IPV6_MLDv2_CHANGE_TO_INCLUDE_MODE);
282 if (ret < 0) {
283 return ret;
284 }
285
286 out:
287 net_if_mcast_monitor(iface, &maddr->address, false);
288
289 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_MCAST_LEAVE, iface,
290 &maddr->address.in6_addr,
291 sizeof(struct in6_addr));
292
293 return ret;
294 }
295
send_mld_report(struct net_if * iface)296 static int send_mld_report(struct net_if *iface)
297 {
298 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
299 struct net_pkt *pkt;
300 int i, count = 0;
301 int ret;
302
303 NET_ASSERT(ipv6);
304
305 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
306 if (!ipv6->mcast[i].is_used || !ipv6->mcast[i].is_joined) {
307 continue;
308 }
309
310 count++;
311 }
312
313 #if defined(CONFIG_NET_MCAST_ROUTE_MLD_REPORTS)
314 /* Increase number of slots by a number of multicast routes that
315 * can be later added to the report. Checking for duplicates is done
316 * while appending an entry.
317 */
318 net_route_mcast_foreach(count_mcast_routes, NULL, (void *)&count);
319 #endif
320
321 pkt = net_pkt_alloc_with_buffer(iface, IPV6_OPT_HDR_ROUTER_ALERT_LEN +
322 NET_ICMPV6_UNUSED_LEN +
323 count * MLDv2_MCAST_RECORD_LEN,
324 AF_INET6, IPPROTO_ICMPV6,
325 PKT_WAIT_TIME);
326 if (!pkt) {
327 return -ENOBUFS;
328 }
329
330 ret = mld_create_packet(pkt, count);
331 if (ret < 0) {
332 goto drop;
333 }
334
335 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
336 if (!ipv6->mcast[i].is_used || !ipv6->mcast[i].is_joined) {
337 continue;
338 }
339
340 ret = mld_create(pkt, &ipv6->mcast[i].address.in6_addr,
341 NET_IPV6_MLDv2_MODE_IS_EXCLUDE);
342 if (ret < 0) {
343 goto drop;
344 }
345 }
346
347 #if defined(CONFIG_NET_MCAST_ROUTE_MLD_REPORTS)
348 /* Append information about multicast routes as packets will be
349 * forwarded to these interfaces on reception.
350 */
351 struct mcast_route_appending_info info;
352
353 info.status = 0;
354 info.pkt = pkt;
355 info.iface = iface;
356 info.skipped = 0;
357
358 net_route_mcast_foreach(append_mcast_routes, NULL, &info);
359
360 ret = info.status;
361 if (ret < 0) {
362 goto drop;
363 }
364
365 /* We may have skipped duplicated addresses that we reserved space for,
366 * modify number of records.
367 */
368 if (info.skipped) {
369 net_pkt_cursor_init(pkt);
370 net_pkt_set_overwrite(pkt, true);
371
372 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) + net_pkt_ipv6_ext_len(pkt) +
373 sizeof(struct net_icmp_hdr) + MLDV2_REPORT_RESERVED_BYTES);
374
375 count -= info.skipped;
376
377 ret = net_pkt_write_be16(pkt, count);
378 if (ret < 0) {
379 goto drop;
380 }
381
382 net_pkt_remove_tail(pkt, info.skipped * sizeof(struct net_icmpv6_mld_mcast_record));
383 }
384 #endif
385
386 ret = mld_send(pkt);
387 if (ret < 0) {
388 goto drop;
389 }
390
391 return 0;
392
393 drop:
394 net_pkt_unref(pkt);
395
396 return ret;
397 }
398
399 #define dbg_addr(action, pkt_str, src, dst) \
400 do { \
401 NET_DBG("%s %s from %s to %s", action, pkt_str, \
402 net_sprint_ipv6_addr(src), \
403 net_sprint_ipv6_addr(dst)); \
404 } while (0)
405
406 #define dbg_addr_recv(pkt_str, src, dst) \
407 dbg_addr("Received", pkt_str, src, dst)
408
handle_mld_query(struct net_icmp_ctx * ctx,struct net_pkt * pkt,struct net_icmp_ip_hdr * hdr,struct net_icmp_hdr * icmp_hdr,void * user_data)409 static int handle_mld_query(struct net_icmp_ctx *ctx,
410 struct net_pkt *pkt,
411 struct net_icmp_ip_hdr *hdr,
412 struct net_icmp_hdr *icmp_hdr,
413 void *user_data)
414 {
415 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(mld_access,
416 struct net_icmpv6_mld_query);
417 struct net_ipv6_hdr *ip_hdr = hdr->ipv6;
418 uint16_t length = net_pkt_get_len(pkt);
419 struct net_icmpv6_mld_query *mld_query;
420 uint16_t pkt_len;
421 int ret = -EIO;
422
423 if (net_pkt_remaining_data(pkt) < sizeof(struct net_icmpv6_mld_query)) {
424 /* MLDv1 query, drop. */
425 ret = 0;
426 goto drop;
427 }
428
429 mld_query = (struct net_icmpv6_mld_query *)
430 net_pkt_get_data(pkt, &mld_access);
431 if (!mld_query) {
432 NET_DBG("DROP: NULL MLD query");
433 goto drop;
434 }
435
436 net_pkt_acknowledge_data(pkt, &mld_access);
437
438 dbg_addr_recv("Multicast Listener Query", &ip_hdr->src, &ip_hdr->dst);
439
440 net_stats_update_ipv6_mld_recv(net_pkt_iface(pkt));
441
442 mld_query->num_sources = ntohs(mld_query->num_sources);
443
444 pkt_len = sizeof(struct net_ipv6_hdr) + net_pkt_ipv6_ext_len(pkt) +
445 sizeof(struct net_icmp_hdr) +
446 sizeof(struct net_icmpv6_mld_query) +
447 sizeof(struct in6_addr) * mld_query->num_sources;
448
449 if (length < pkt_len || pkt_len > NET_IPV6_MTU ||
450 ip_hdr->hop_limit != 1U || icmp_hdr->code != 0U) {
451 goto drop;
452 }
453
454 /* Currently we only support an unspecified address query. */
455 if (!net_ipv6_addr_cmp_raw(mld_query->mcast_address,
456 (uint8_t *)net_ipv6_unspecified_address())) {
457 NET_DBG("DROP: only supporting unspecified address query");
458 goto drop;
459 }
460
461 return send_mld_report(net_pkt_iface(pkt));
462
463 drop:
464 net_stats_update_ipv6_mld_drop(net_pkt_iface(pkt));
465
466 return ret;
467 }
468
net_ipv6_mld_init(void)469 void net_ipv6_mld_init(void)
470 {
471 static struct net_icmp_ctx ctx;
472 int ret;
473
474 ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_MLD_QUERY, 0, handle_mld_query);
475 if (ret < 0) {
476 NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_MLD_QUERY),
477 ret);
478 }
479 }
480