1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * Bridge multicast support. |
4 | * |
5 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> |
6 | */ |
7 | |
8 | #include <linux/err.h> |
9 | #include <linux/export.h> |
10 | #include <linux/if_ether.h> |
11 | #include <linux/igmp.h> |
12 | #include <linux/in.h> |
13 | #include <linux/jhash.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/netdevice.h> |
17 | #include <linux/netfilter_bridge.h> |
18 | #include <linux/random.h> |
19 | #include <linux/rculist.h> |
20 | #include <linux/skbuff.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/timer.h> |
23 | #include <linux/inetdevice.h> |
24 | #include <linux/mroute.h> |
25 | #include <net/ip.h> |
26 | #include <net/switchdev.h> |
27 | #if IS_ENABLED(CONFIG_IPV6) |
28 | #include <linux/icmpv6.h> |
29 | #include <net/ipv6.h> |
30 | #include <net/mld.h> |
31 | #include <net/ip6_checksum.h> |
32 | #include <net/addrconf.h> |
33 | #endif |
34 | #include <trace/events/bridge.h> |
35 | |
36 | #include "br_private.h" |
37 | #include "br_private_mcast_eht.h" |
38 | |
39 | static const struct rhashtable_params br_mdb_rht_params = { |
40 | .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), |
41 | .key_offset = offsetof(struct net_bridge_mdb_entry, addr), |
42 | .key_len = sizeof(struct br_ip), |
43 | .automatic_shrinking = true, |
44 | }; |
45 | |
46 | static const struct rhashtable_params br_sg_port_rht_params = { |
47 | .head_offset = offsetof(struct net_bridge_port_group, rhnode), |
48 | .key_offset = offsetof(struct net_bridge_port_group, key), |
49 | .key_len = sizeof(struct net_bridge_port_group_sg_key), |
50 | .automatic_shrinking = true, |
51 | }; |
52 | |
53 | static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, |
54 | struct bridge_mcast_own_query *query); |
55 | static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, |
56 | struct net_bridge_mcast_port *pmctx); |
57 | static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, |
58 | struct net_bridge_mcast_port *pmctx, |
59 | __be32 group, |
60 | __u16 vid, |
61 | const unsigned char *src); |
62 | static void br_multicast_port_group_rexmit(struct timer_list *t); |
63 | |
64 | static void |
65 | br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); |
66 | static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, |
67 | struct net_bridge_mcast_port *pmctx); |
68 | #if IS_ENABLED(CONFIG_IPV6) |
69 | static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, |
70 | struct net_bridge_mcast_port *pmctx, |
71 | const struct in6_addr *group, |
72 | __u16 vid, const unsigned char *src); |
73 | #endif |
74 | static struct net_bridge_port_group * |
75 | __br_multicast_add_group(struct net_bridge_mcast *brmctx, |
76 | struct net_bridge_mcast_port *pmctx, |
77 | struct br_ip *group, |
78 | const unsigned char *src, |
79 | u8 filter_mode, |
80 | bool igmpv2_mldv1, |
81 | bool blocked); |
82 | static void br_multicast_find_del_pg(struct net_bridge *br, |
83 | struct net_bridge_port_group *pg); |
84 | static void __br_multicast_stop(struct net_bridge_mcast *brmctx); |
85 | |
86 | static int br_mc_disabled_update(struct net_device *dev, bool value, |
87 | struct netlink_ext_ack *extack); |
88 | |
89 | static struct net_bridge_port_group * |
90 | br_sg_port_find(struct net_bridge *br, |
91 | struct net_bridge_port_group_sg_key *sg_p) |
92 | { |
93 | lockdep_assert_held_once(&br->multicast_lock); |
94 | |
95 | return rhashtable_lookup_fast(ht: &br->sg_port_tbl, key: sg_p, |
96 | params: br_sg_port_rht_params); |
97 | } |
98 | |
99 | static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, |
100 | struct br_ip *dst) |
101 | { |
102 | return rhashtable_lookup(ht: &br->mdb_hash_tbl, key: dst, params: br_mdb_rht_params); |
103 | } |
104 | |
105 | struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, |
106 | struct br_ip *dst) |
107 | { |
108 | struct net_bridge_mdb_entry *ent; |
109 | |
110 | lockdep_assert_held_once(&br->multicast_lock); |
111 | |
112 | rcu_read_lock(); |
113 | ent = rhashtable_lookup(ht: &br->mdb_hash_tbl, key: dst, params: br_mdb_rht_params); |
114 | rcu_read_unlock(); |
115 | |
116 | return ent; |
117 | } |
118 | |
119 | static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, |
120 | __be32 dst, __u16 vid) |
121 | { |
122 | struct br_ip br_dst; |
123 | |
124 | memset(&br_dst, 0, sizeof(br_dst)); |
125 | br_dst.dst.ip4 = dst; |
126 | br_dst.proto = htons(ETH_P_IP); |
127 | br_dst.vid = vid; |
128 | |
129 | return br_mdb_ip_get(br, dst: &br_dst); |
130 | } |
131 | |
132 | #if IS_ENABLED(CONFIG_IPV6) |
133 | static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, |
134 | const struct in6_addr *dst, |
135 | __u16 vid) |
136 | { |
137 | struct br_ip br_dst; |
138 | |
139 | memset(&br_dst, 0, sizeof(br_dst)); |
140 | br_dst.dst.ip6 = *dst; |
141 | br_dst.proto = htons(ETH_P_IPV6); |
142 | br_dst.vid = vid; |
143 | |
144 | return br_mdb_ip_get(br, dst: &br_dst); |
145 | } |
146 | #endif |
147 | |
148 | struct net_bridge_mdb_entry * |
149 | br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb, |
150 | u16 vid) |
151 | { |
152 | struct net_bridge *br = brmctx->br; |
153 | struct br_ip ip; |
154 | |
155 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) || |
156 | br_multicast_ctx_vlan_global_disabled(brmctx)) |
157 | return NULL; |
158 | |
159 | if (BR_INPUT_SKB_CB(skb)->igmp) |
160 | return NULL; |
161 | |
162 | memset(&ip, 0, sizeof(ip)); |
163 | ip.proto = skb->protocol; |
164 | ip.vid = vid; |
165 | |
166 | switch (skb->protocol) { |
167 | case htons(ETH_P_IP): |
168 | ip.dst.ip4 = ip_hdr(skb)->daddr; |
169 | if (brmctx->multicast_igmp_version == 3) { |
170 | struct net_bridge_mdb_entry *mdb; |
171 | |
172 | ip.src.ip4 = ip_hdr(skb)->saddr; |
173 | mdb = br_mdb_ip_get_rcu(br, dst: &ip); |
174 | if (mdb) |
175 | return mdb; |
176 | ip.src.ip4 = 0; |
177 | } |
178 | break; |
179 | #if IS_ENABLED(CONFIG_IPV6) |
180 | case htons(ETH_P_IPV6): |
181 | ip.dst.ip6 = ipv6_hdr(skb)->daddr; |
182 | if (brmctx->multicast_mld_version == 2) { |
183 | struct net_bridge_mdb_entry *mdb; |
184 | |
185 | ip.src.ip6 = ipv6_hdr(skb)->saddr; |
186 | mdb = br_mdb_ip_get_rcu(br, dst: &ip); |
187 | if (mdb) |
188 | return mdb; |
189 | memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); |
190 | } |
191 | break; |
192 | #endif |
193 | default: |
194 | ip.proto = 0; |
195 | ether_addr_copy(dst: ip.dst.mac_addr, src: eth_hdr(skb)->h_dest); |
196 | } |
197 | |
198 | return br_mdb_ip_get_rcu(br, dst: &ip); |
199 | } |
200 | |
201 | /* IMPORTANT: this function must be used only when the contexts cannot be |
202 | * passed down (e.g. timer) and must be used for read-only purposes because |
203 | * the vlan snooping option can change, so it can return any context |
204 | * (non-vlan or vlan). Its initial intended purpose is to read timer values |
205 | * from the *current* context based on the option. At worst that could lead |
206 | * to inconsistent timers when the contexts are changed, i.e. src timer |
207 | * which needs to re-arm with a specific delay taken from the old context |
208 | */ |
209 | static struct net_bridge_mcast_port * |
210 | br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) |
211 | { |
212 | struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; |
213 | struct net_bridge_vlan *vlan; |
214 | |
215 | lockdep_assert_held_once(&pg->key.port->br->multicast_lock); |
216 | |
217 | /* if vlan snooping is disabled use the port's multicast context */ |
218 | if (!pg->key.addr.vid || |
219 | !br_opt_get(br: pg->key.port->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) |
220 | goto out; |
221 | |
222 | /* locking is tricky here, due to different rules for multicast and |
223 | * vlans we need to take rcu to find the vlan and make sure it has |
224 | * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under |
225 | * multicast_lock which must be already held here, so the vlan's pmctx |
226 | * can safely be used on return |
227 | */ |
228 | rcu_read_lock(); |
229 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: pg->key.port), vid: pg->key.addr.vid); |
230 | if (vlan && !br_multicast_port_ctx_vlan_disabled(pmctx: &vlan->port_mcast_ctx)) |
231 | pmctx = &vlan->port_mcast_ctx; |
232 | else |
233 | pmctx = NULL; |
234 | rcu_read_unlock(); |
235 | out: |
236 | return pmctx; |
237 | } |
238 | |
239 | static struct net_bridge_mcast_port * |
240 | br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid) |
241 | { |
242 | struct net_bridge_mcast_port *pmctx = NULL; |
243 | struct net_bridge_vlan *vlan; |
244 | |
245 | lockdep_assert_held_once(&port->br->multicast_lock); |
246 | |
247 | if (!br_opt_get(br: port->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) |
248 | return NULL; |
249 | |
250 | /* Take RCU to access the vlan. */ |
251 | rcu_read_lock(); |
252 | |
253 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: port), vid); |
254 | if (vlan && !br_multicast_port_ctx_vlan_disabled(pmctx: &vlan->port_mcast_ctx)) |
255 | pmctx = &vlan->port_mcast_ctx; |
256 | |
257 | rcu_read_unlock(); |
258 | |
259 | return pmctx; |
260 | } |
261 | |
262 | /* when snooping we need to check if the contexts should be used |
263 | * in the following order: |
264 | * - if pmctx is non-NULL (port), check if it should be used |
265 | * - if pmctx is NULL (bridge), check if brmctx should be used |
266 | */ |
267 | static bool |
268 | br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, |
269 | const struct net_bridge_mcast_port *pmctx) |
270 | { |
271 | if (!netif_running(dev: brmctx->br->dev)) |
272 | return false; |
273 | |
274 | if (pmctx) |
275 | return !br_multicast_port_ctx_state_disabled(pmctx); |
276 | else |
277 | return !br_multicast_ctx_vlan_disabled(brmctx); |
278 | } |
279 | |
280 | static bool br_port_group_equal(struct net_bridge_port_group *p, |
281 | struct net_bridge_port *port, |
282 | const unsigned char *src) |
283 | { |
284 | if (p->key.port != port) |
285 | return false; |
286 | |
287 | if (!(port->flags & BR_MULTICAST_TO_UNICAST)) |
288 | return true; |
289 | |
290 | return ether_addr_equal(addr1: src, addr2: p->eth_addr); |
291 | } |
292 | |
293 | static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, |
294 | struct net_bridge_port_group *pg, |
295 | struct br_ip *sg_ip) |
296 | { |
297 | struct net_bridge_port_group_sg_key sg_key; |
298 | struct net_bridge_port_group *src_pg; |
299 | struct net_bridge_mcast *brmctx; |
300 | |
301 | memset(&sg_key, 0, sizeof(sg_key)); |
302 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
303 | sg_key.port = pg->key.port; |
304 | sg_key.addr = *sg_ip; |
305 | if (br_sg_port_find(br: brmctx->br, sg_p: &sg_key)) |
306 | return; |
307 | |
308 | src_pg = __br_multicast_add_group(brmctx, pmctx, |
309 | group: sg_ip, src: pg->eth_addr, |
310 | MCAST_INCLUDE, igmpv2_mldv1: false, blocked: false); |
311 | if (IS_ERR_OR_NULL(ptr: src_pg) || |
312 | src_pg->rt_protocol != RTPROT_KERNEL) |
313 | return; |
314 | |
315 | src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; |
316 | } |
317 | |
318 | static void __fwd_del_star_excl(struct net_bridge_port_group *pg, |
319 | struct br_ip *sg_ip) |
320 | { |
321 | struct net_bridge_port_group_sg_key sg_key; |
322 | struct net_bridge *br = pg->key.port->br; |
323 | struct net_bridge_port_group *src_pg; |
324 | |
325 | memset(&sg_key, 0, sizeof(sg_key)); |
326 | sg_key.port = pg->key.port; |
327 | sg_key.addr = *sg_ip; |
328 | src_pg = br_sg_port_find(br, sg_p: &sg_key); |
329 | if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || |
330 | src_pg->rt_protocol != RTPROT_KERNEL) |
331 | return; |
332 | |
333 | br_multicast_find_del_pg(br, pg: src_pg); |
334 | } |
335 | |
336 | /* When a port group transitions to (or is added as) EXCLUDE we need to add it |
337 | * to all other ports' S,G entries which are not blocked by the current group |
338 | * for proper replication, the assumption is that any S,G blocked entries |
339 | * are already added so the S,G,port lookup should skip them. |
340 | * When a port group transitions from EXCLUDE -> INCLUDE mode or is being |
341 | * deleted we need to remove it from all ports' S,G entries where it was |
342 | * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). |
343 | */ |
344 | void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, |
345 | u8 filter_mode) |
346 | { |
347 | struct net_bridge *br = pg->key.port->br; |
348 | struct net_bridge_port_group *pg_lst; |
349 | struct net_bridge_mcast_port *pmctx; |
350 | struct net_bridge_mdb_entry *mp; |
351 | struct br_ip sg_ip; |
352 | |
353 | if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) |
354 | return; |
355 | |
356 | mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
357 | if (!mp) |
358 | return; |
359 | pmctx = br_multicast_pg_to_port_ctx(pg); |
360 | if (!pmctx) |
361 | return; |
362 | |
363 | memset(&sg_ip, 0, sizeof(sg_ip)); |
364 | sg_ip = pg->key.addr; |
365 | |
366 | for (pg_lst = mlock_dereference(mp->ports, br); |
367 | pg_lst; |
368 | pg_lst = mlock_dereference(pg_lst->next, br)) { |
369 | struct net_bridge_group_src *src_ent; |
370 | |
371 | if (pg_lst == pg) |
372 | continue; |
373 | hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { |
374 | if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) |
375 | continue; |
376 | sg_ip.src = src_ent->addr.src; |
377 | switch (filter_mode) { |
378 | case MCAST_INCLUDE: |
379 | __fwd_del_star_excl(pg, sg_ip: &sg_ip); |
380 | break; |
381 | case MCAST_EXCLUDE: |
382 | __fwd_add_star_excl(pmctx, pg, sg_ip: &sg_ip); |
383 | break; |
384 | } |
385 | } |
386 | } |
387 | } |
388 | |
389 | /* called when adding a new S,G with host_joined == false by default */ |
390 | static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, |
391 | struct net_bridge_port_group *sg) |
392 | { |
393 | struct net_bridge_mdb_entry *sg_mp; |
394 | |
395 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
396 | return; |
397 | if (!star_mp->host_joined) |
398 | return; |
399 | |
400 | sg_mp = br_mdb_ip_get(br: star_mp->br, dst: &sg->key.addr); |
401 | if (!sg_mp) |
402 | return; |
403 | sg_mp->host_joined = true; |
404 | } |
405 | |
406 | /* set the host_joined state of all of *,G's S,G entries */ |
407 | static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) |
408 | { |
409 | struct net_bridge *br = star_mp->br; |
410 | struct net_bridge_mdb_entry *sg_mp; |
411 | struct net_bridge_port_group *pg; |
412 | struct br_ip sg_ip; |
413 | |
414 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
415 | return; |
416 | |
417 | memset(&sg_ip, 0, sizeof(sg_ip)); |
418 | sg_ip = star_mp->addr; |
419 | for (pg = mlock_dereference(star_mp->ports, br); |
420 | pg; |
421 | pg = mlock_dereference(pg->next, br)) { |
422 | struct net_bridge_group_src *src_ent; |
423 | |
424 | hlist_for_each_entry(src_ent, &pg->src_list, node) { |
425 | if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) |
426 | continue; |
427 | sg_ip.src = src_ent->addr.src; |
428 | sg_mp = br_mdb_ip_get(br, dst: &sg_ip); |
429 | if (!sg_mp) |
430 | continue; |
431 | sg_mp->host_joined = star_mp->host_joined; |
432 | } |
433 | } |
434 | } |
435 | |
436 | static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) |
437 | { |
438 | struct net_bridge_port_group __rcu **pp; |
439 | struct net_bridge_port_group *p; |
440 | |
441 | /* *,G exclude ports are only added to S,G entries */ |
442 | if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) |
443 | return; |
444 | |
445 | /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports |
446 | * we should ignore perm entries since they're managed by user-space |
447 | */ |
448 | for (pp = &sgmp->ports; |
449 | (p = mlock_dereference(*pp, sgmp->br)) != NULL; |
450 | pp = &p->next) |
451 | if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | |
452 | MDB_PG_FLAGS_PERMANENT))) |
453 | return; |
454 | |
455 | /* currently the host can only have joined the *,G which means |
456 | * we treat it as EXCLUDE {}, so for an S,G it's considered a |
457 | * STAR_EXCLUDE entry and we can safely leave it |
458 | */ |
459 | sgmp->host_joined = false; |
460 | |
461 | for (pp = &sgmp->ports; |
462 | (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { |
463 | if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) |
464 | br_multicast_del_pg(mp: sgmp, pg: p, pp); |
465 | else |
466 | pp = &p->next; |
467 | } |
468 | } |
469 | |
470 | void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, |
471 | struct net_bridge_port_group *sg) |
472 | { |
473 | struct net_bridge_port_group_sg_key sg_key; |
474 | struct net_bridge *br = star_mp->br; |
475 | struct net_bridge_mcast_port *pmctx; |
476 | struct net_bridge_port_group *pg; |
477 | struct net_bridge_mcast *brmctx; |
478 | |
479 | if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) |
480 | return; |
481 | if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) |
482 | return; |
483 | |
484 | br_multicast_sg_host_state(star_mp, sg); |
485 | memset(&sg_key, 0, sizeof(sg_key)); |
486 | sg_key.addr = sg->key.addr; |
487 | /* we need to add all exclude ports to the S,G */ |
488 | for (pg = mlock_dereference(star_mp->ports, br); |
489 | pg; |
490 | pg = mlock_dereference(pg->next, br)) { |
491 | struct net_bridge_port_group *src_pg; |
492 | |
493 | if (pg == sg || pg->filter_mode == MCAST_INCLUDE) |
494 | continue; |
495 | |
496 | sg_key.port = pg->key.port; |
497 | if (br_sg_port_find(br, sg_p: &sg_key)) |
498 | continue; |
499 | |
500 | pmctx = br_multicast_pg_to_port_ctx(pg); |
501 | if (!pmctx) |
502 | continue; |
503 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
504 | |
505 | src_pg = __br_multicast_add_group(brmctx, pmctx, |
506 | group: &sg->key.addr, |
507 | src: sg->eth_addr, |
508 | MCAST_INCLUDE, igmpv2_mldv1: false, blocked: false); |
509 | if (IS_ERR_OR_NULL(ptr: src_pg) || |
510 | src_pg->rt_protocol != RTPROT_KERNEL) |
511 | continue; |
512 | src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; |
513 | } |
514 | } |
515 | |
516 | static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) |
517 | { |
518 | struct net_bridge_mdb_entry *star_mp; |
519 | struct net_bridge_mcast_port *pmctx; |
520 | struct net_bridge_port_group *sg; |
521 | struct net_bridge_mcast *brmctx; |
522 | struct br_ip sg_ip; |
523 | |
524 | if (src->flags & BR_SGRP_F_INSTALLED) |
525 | return; |
526 | |
527 | memset(&sg_ip, 0, sizeof(sg_ip)); |
528 | pmctx = br_multicast_pg_to_port_ctx(pg: src->pg); |
529 | if (!pmctx) |
530 | return; |
531 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
532 | sg_ip = src->pg->key.addr; |
533 | sg_ip.src = src->addr.src; |
534 | |
535 | sg = __br_multicast_add_group(brmctx, pmctx, group: &sg_ip, |
536 | src: src->pg->eth_addr, MCAST_INCLUDE, igmpv2_mldv1: false, |
537 | blocked: !timer_pending(timer: &src->timer)); |
538 | if (IS_ERR_OR_NULL(ptr: sg)) |
539 | return; |
540 | src->flags |= BR_SGRP_F_INSTALLED; |
541 | sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; |
542 | |
543 | /* if it was added by user-space as perm we can skip next steps */ |
544 | if (sg->rt_protocol != RTPROT_KERNEL && |
545 | (sg->flags & MDB_PG_FLAGS_PERMANENT)) |
546 | return; |
547 | |
548 | /* the kernel is now responsible for removing this S,G */ |
549 | del_timer(timer: &sg->timer); |
550 | star_mp = br_mdb_ip_get(br: src->br, dst: &src->pg->key.addr); |
551 | if (!star_mp) |
552 | return; |
553 | |
554 | br_multicast_sg_add_exclude_ports(star_mp, sg); |
555 | } |
556 | |
557 | static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, |
558 | bool fastleave) |
559 | { |
560 | struct net_bridge_port_group *p, *pg = src->pg; |
561 | struct net_bridge_port_group __rcu **pp; |
562 | struct net_bridge_mdb_entry *mp; |
563 | struct br_ip sg_ip; |
564 | |
565 | memset(&sg_ip, 0, sizeof(sg_ip)); |
566 | sg_ip = pg->key.addr; |
567 | sg_ip.src = src->addr.src; |
568 | |
569 | mp = br_mdb_ip_get(br: src->br, dst: &sg_ip); |
570 | if (!mp) |
571 | return; |
572 | |
573 | for (pp = &mp->ports; |
574 | (p = mlock_dereference(*pp, src->br)) != NULL; |
575 | pp = &p->next) { |
576 | if (!br_port_group_equal(p, port: pg->key.port, src: pg->eth_addr)) |
577 | continue; |
578 | |
579 | if (p->rt_protocol != RTPROT_KERNEL && |
580 | (p->flags & MDB_PG_FLAGS_PERMANENT) && |
581 | !(src->flags & BR_SGRP_F_USER_ADDED)) |
582 | break; |
583 | |
584 | if (fastleave) |
585 | p->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
586 | br_multicast_del_pg(mp, pg: p, pp); |
587 | break; |
588 | } |
589 | src->flags &= ~BR_SGRP_F_INSTALLED; |
590 | } |
591 | |
592 | /* install S,G and based on src's timer enable or disable forwarding */ |
593 | static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) |
594 | { |
595 | struct net_bridge_port_group_sg_key sg_key; |
596 | struct net_bridge_port_group *sg; |
597 | u8 old_flags; |
598 | |
599 | br_multicast_fwd_src_add(src); |
600 | |
601 | memset(&sg_key, 0, sizeof(sg_key)); |
602 | sg_key.addr = src->pg->key.addr; |
603 | sg_key.addr.src = src->addr.src; |
604 | sg_key.port = src->pg->key.port; |
605 | |
606 | sg = br_sg_port_find(br: src->br, sg_p: &sg_key); |
607 | if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) |
608 | return; |
609 | |
610 | old_flags = sg->flags; |
611 | if (timer_pending(timer: &src->timer)) |
612 | sg->flags &= ~MDB_PG_FLAGS_BLOCKED; |
613 | else |
614 | sg->flags |= MDB_PG_FLAGS_BLOCKED; |
615 | |
616 | if (old_flags != sg->flags) { |
617 | struct net_bridge_mdb_entry *sg_mp; |
618 | |
619 | sg_mp = br_mdb_ip_get(br: src->br, dst: &sg_key.addr); |
620 | if (!sg_mp) |
621 | return; |
622 | br_mdb_notify(dev: src->br->dev, mp: sg_mp, pg: sg, RTM_NEWMDB); |
623 | } |
624 | } |
625 | |
626 | static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) |
627 | { |
628 | struct net_bridge_mdb_entry *mp; |
629 | |
630 | mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); |
631 | WARN_ON(!hlist_unhashed(&mp->mdb_node)); |
632 | WARN_ON(mp->ports); |
633 | |
634 | timer_shutdown_sync(timer: &mp->timer); |
635 | kfree_rcu(mp, rcu); |
636 | } |
637 | |
638 | static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) |
639 | { |
640 | struct net_bridge *br = mp->br; |
641 | |
642 | rhashtable_remove_fast(ht: &br->mdb_hash_tbl, obj: &mp->rhnode, |
643 | params: br_mdb_rht_params); |
644 | hlist_del_init_rcu(n: &mp->mdb_node); |
645 | hlist_add_head(n: &mp->mcast_gc.gc_node, h: &br->mcast_gc_list); |
646 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
647 | } |
648 | |
649 | static void br_multicast_group_expired(struct timer_list *t) |
650 | { |
651 | struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); |
652 | struct net_bridge *br = mp->br; |
653 | |
654 | spin_lock(lock: &br->multicast_lock); |
655 | if (hlist_unhashed(h: &mp->mdb_node) || !netif_running(dev: br->dev) || |
656 | timer_pending(timer: &mp->timer)) |
657 | goto out; |
658 | |
659 | br_multicast_host_leave(mp, notify: true); |
660 | |
661 | if (mp->ports) |
662 | goto out; |
663 | br_multicast_del_mdb_entry(mp); |
664 | out: |
665 | spin_unlock(lock: &br->multicast_lock); |
666 | } |
667 | |
668 | static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) |
669 | { |
670 | struct net_bridge_group_src *src; |
671 | |
672 | src = container_of(gc, struct net_bridge_group_src, mcast_gc); |
673 | WARN_ON(!hlist_unhashed(&src->node)); |
674 | |
675 | timer_shutdown_sync(timer: &src->timer); |
676 | kfree_rcu(src, rcu); |
677 | } |
678 | |
679 | void __br_multicast_del_group_src(struct net_bridge_group_src *src) |
680 | { |
681 | struct net_bridge *br = src->pg->key.port->br; |
682 | |
683 | hlist_del_init_rcu(n: &src->node); |
684 | src->pg->src_ents--; |
685 | hlist_add_head(n: &src->mcast_gc.gc_node, h: &br->mcast_gc_list); |
686 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
687 | } |
688 | |
689 | void br_multicast_del_group_src(struct net_bridge_group_src *src, |
690 | bool fastleave) |
691 | { |
692 | br_multicast_fwd_src_remove(src, fastleave); |
693 | __br_multicast_del_group_src(src); |
694 | } |
695 | |
696 | static int |
697 | br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx, |
698 | struct netlink_ext_ack *extack, |
699 | const char *what) |
700 | { |
701 | u32 max = READ_ONCE(pmctx->mdb_max_entries); |
702 | u32 n = READ_ONCE(pmctx->mdb_n_entries); |
703 | |
704 | if (max && n >= max) { |
705 | NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u", |
706 | what, n, max); |
707 | return -E2BIG; |
708 | } |
709 | |
710 | WRITE_ONCE(pmctx->mdb_n_entries, n + 1); |
711 | return 0; |
712 | } |
713 | |
714 | static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx) |
715 | { |
716 | u32 n = READ_ONCE(pmctx->mdb_n_entries); |
717 | |
718 | WARN_ON_ONCE(n == 0); |
719 | WRITE_ONCE(pmctx->mdb_n_entries, n - 1); |
720 | } |
721 | |
722 | static int br_multicast_port_ngroups_inc(struct net_bridge_port *port, |
723 | const struct br_ip *group, |
724 | struct netlink_ext_ack *extack) |
725 | { |
726 | struct net_bridge_mcast_port *pmctx; |
727 | int err; |
728 | |
729 | lockdep_assert_held_once(&port->br->multicast_lock); |
730 | |
731 | /* Always count on the port context. */ |
732 | err = br_multicast_port_ngroups_inc_one(pmctx: &port->multicast_ctx, extack, |
733 | what: "Port"); |
734 | if (err) { |
735 | trace_br_mdb_full(dev: port->dev, group); |
736 | return err; |
737 | } |
738 | |
739 | /* Only count on the VLAN context if VID is given, and if snooping on |
740 | * that VLAN is enabled. |
741 | */ |
742 | if (!group->vid) |
743 | return 0; |
744 | |
745 | pmctx = br_multicast_port_vid_to_port_ctx(port, vid: group->vid); |
746 | if (!pmctx) |
747 | return 0; |
748 | |
749 | err = br_multicast_port_ngroups_inc_one(pmctx, extack, what: "Port-VLAN"); |
750 | if (err) { |
751 | trace_br_mdb_full(dev: port->dev, group); |
752 | goto dec_one_out; |
753 | } |
754 | |
755 | return 0; |
756 | |
757 | dec_one_out: |
758 | br_multicast_port_ngroups_dec_one(pmctx: &port->multicast_ctx); |
759 | return err; |
760 | } |
761 | |
762 | static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid) |
763 | { |
764 | struct net_bridge_mcast_port *pmctx; |
765 | |
766 | lockdep_assert_held_once(&port->br->multicast_lock); |
767 | |
768 | if (vid) { |
769 | pmctx = br_multicast_port_vid_to_port_ctx(port, vid); |
770 | if (pmctx) |
771 | br_multicast_port_ngroups_dec_one(pmctx); |
772 | } |
773 | br_multicast_port_ngroups_dec_one(pmctx: &port->multicast_ctx); |
774 | } |
775 | |
776 | u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx) |
777 | { |
778 | return READ_ONCE(pmctx->mdb_n_entries); |
779 | } |
780 | |
781 | void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max) |
782 | { |
783 | WRITE_ONCE(pmctx->mdb_max_entries, max); |
784 | } |
785 | |
786 | u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx) |
787 | { |
788 | return READ_ONCE(pmctx->mdb_max_entries); |
789 | } |
790 | |
791 | static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) |
792 | { |
793 | struct net_bridge_port_group *pg; |
794 | |
795 | pg = container_of(gc, struct net_bridge_port_group, mcast_gc); |
796 | WARN_ON(!hlist_unhashed(&pg->mglist)); |
797 | WARN_ON(!hlist_empty(&pg->src_list)); |
798 | |
799 | timer_shutdown_sync(timer: &pg->rexmit_timer); |
800 | timer_shutdown_sync(timer: &pg->timer); |
801 | kfree_rcu(pg, rcu); |
802 | } |
803 | |
804 | void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, |
805 | struct net_bridge_port_group *pg, |
806 | struct net_bridge_port_group __rcu **pp) |
807 | { |
808 | struct net_bridge *br = pg->key.port->br; |
809 | struct net_bridge_group_src *ent; |
810 | struct hlist_node *tmp; |
811 | |
812 | rcu_assign_pointer(*pp, pg->next); |
813 | hlist_del_init(n: &pg->mglist); |
814 | br_multicast_eht_clean_sets(pg); |
815 | hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) |
816 | br_multicast_del_group_src(src: ent, fastleave: false); |
817 | br_mdb_notify(dev: br->dev, mp, pg, RTM_DELMDB); |
818 | if (!br_multicast_is_star_g(ip: &mp->addr)) { |
819 | rhashtable_remove_fast(ht: &br->sg_port_tbl, obj: &pg->rhnode, |
820 | params: br_sg_port_rht_params); |
821 | br_multicast_sg_del_exclude_ports(sgmp: mp); |
822 | } else { |
823 | br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); |
824 | } |
825 | br_multicast_port_ngroups_dec(port: pg->key.port, vid: pg->key.addr.vid); |
826 | hlist_add_head(n: &pg->mcast_gc.gc_node, h: &br->mcast_gc_list); |
827 | queue_work(wq: system_long_wq, work: &br->mcast_gc_work); |
828 | |
829 | if (!mp->ports && !mp->host_joined && netif_running(dev: br->dev)) |
830 | mod_timer(timer: &mp->timer, expires: jiffies); |
831 | } |
832 | |
833 | static void br_multicast_find_del_pg(struct net_bridge *br, |
834 | struct net_bridge_port_group *pg) |
835 | { |
836 | struct net_bridge_port_group __rcu **pp; |
837 | struct net_bridge_mdb_entry *mp; |
838 | struct net_bridge_port_group *p; |
839 | |
840 | mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
841 | if (WARN_ON(!mp)) |
842 | return; |
843 | |
844 | for (pp = &mp->ports; |
845 | (p = mlock_dereference(*pp, br)) != NULL; |
846 | pp = &p->next) { |
847 | if (p != pg) |
848 | continue; |
849 | |
850 | br_multicast_del_pg(mp, pg, pp); |
851 | return; |
852 | } |
853 | |
854 | WARN_ON(1); |
855 | } |
856 | |
857 | static void br_multicast_port_group_expired(struct timer_list *t) |
858 | { |
859 | struct net_bridge_port_group *pg = from_timer(pg, t, timer); |
860 | struct net_bridge_group_src *src_ent; |
861 | struct net_bridge *br = pg->key.port->br; |
862 | struct hlist_node *tmp; |
863 | bool changed; |
864 | |
865 | spin_lock(lock: &br->multicast_lock); |
866 | if (!netif_running(dev: br->dev) || timer_pending(timer: &pg->timer) || |
867 | hlist_unhashed(h: &pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) |
868 | goto out; |
869 | |
870 | changed = !!(pg->filter_mode == MCAST_EXCLUDE); |
871 | pg->filter_mode = MCAST_INCLUDE; |
872 | hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { |
873 | if (!timer_pending(timer: &src_ent->timer)) { |
874 | br_multicast_del_group_src(src: src_ent, fastleave: false); |
875 | changed = true; |
876 | } |
877 | } |
878 | |
879 | if (hlist_empty(h: &pg->src_list)) { |
880 | br_multicast_find_del_pg(br, pg); |
881 | } else if (changed) { |
882 | struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, dst: &pg->key.addr); |
883 | |
884 | if (changed && br_multicast_is_star_g(ip: &pg->key.addr)) |
885 | br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); |
886 | |
887 | if (WARN_ON(!mp)) |
888 | goto out; |
889 | br_mdb_notify(dev: br->dev, mp, pg, RTM_NEWMDB); |
890 | } |
891 | out: |
892 | spin_unlock(lock: &br->multicast_lock); |
893 | } |
894 | |
895 | static void br_multicast_gc(struct hlist_head *head) |
896 | { |
897 | struct net_bridge_mcast_gc *gcent; |
898 | struct hlist_node *tmp; |
899 | |
900 | hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { |
901 | hlist_del_init(n: &gcent->gc_node); |
902 | gcent->destroy(gcent); |
903 | } |
904 | } |
905 | |
906 | static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, |
907 | struct net_bridge_mcast_port *pmctx, |
908 | struct sk_buff *skb) |
909 | { |
910 | struct net_bridge_vlan *vlan = NULL; |
911 | |
912 | if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) |
913 | vlan = pmctx->vlan; |
914 | else if (br_multicast_ctx_is_vlan(brmctx)) |
915 | vlan = brmctx->vlan; |
916 | |
917 | if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { |
918 | u16 vlan_proto; |
919 | |
920 | if (br_vlan_get_proto(dev: brmctx->br->dev, p_proto: &vlan_proto) != 0) |
921 | return; |
922 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci: vlan->vid); |
923 | } |
924 | } |
925 | |
926 | static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
927 | struct net_bridge_mcast_port *pmctx, |
928 | struct net_bridge_port_group *pg, |
929 | __be32 ip_dst, __be32 group, |
930 | bool with_srcs, bool over_lmqt, |
931 | u8 sflag, u8 *igmp_type, |
932 | bool *need_rexmit) |
933 | { |
934 | struct net_bridge_port *p = pg ? pg->key.port : NULL; |
935 | struct net_bridge_group_src *ent; |
936 | size_t pkt_size, igmp_hdr_size; |
937 | unsigned long now = jiffies; |
938 | struct igmpv3_query *ihv3; |
939 | void *csum_start = NULL; |
940 | __sum16 *csum = NULL; |
941 | struct sk_buff *skb; |
942 | struct igmphdr *ih; |
943 | struct ethhdr *eth; |
944 | unsigned long lmqt; |
945 | struct iphdr *iph; |
946 | u16 lmqt_srcs = 0; |
947 | |
948 | igmp_hdr_size = sizeof(*ih); |
949 | if (brmctx->multicast_igmp_version == 3) { |
950 | igmp_hdr_size = sizeof(*ihv3); |
951 | if (pg && with_srcs) { |
952 | lmqt = now + (brmctx->multicast_last_member_interval * |
953 | brmctx->multicast_last_member_count); |
954 | hlist_for_each_entry(ent, &pg->src_list, node) { |
955 | if (over_lmqt == time_after(ent->timer.expires, |
956 | lmqt) && |
957 | ent->src_query_rexmit_cnt > 0) |
958 | lmqt_srcs++; |
959 | } |
960 | |
961 | if (!lmqt_srcs) |
962 | return NULL; |
963 | igmp_hdr_size += lmqt_srcs * sizeof(__be32); |
964 | } |
965 | } |
966 | |
967 | pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; |
968 | if ((p && pkt_size > p->dev->mtu) || |
969 | pkt_size > brmctx->br->dev->mtu) |
970 | return NULL; |
971 | |
972 | skb = netdev_alloc_skb_ip_align(dev: brmctx->br->dev, length: pkt_size); |
973 | if (!skb) |
974 | goto out; |
975 | |
976 | __br_multicast_query_handle_vlan(brmctx, pmctx, skb); |
977 | skb->protocol = htons(ETH_P_IP); |
978 | |
979 | skb_reset_mac_header(skb); |
980 | eth = eth_hdr(skb); |
981 | |
982 | ether_addr_copy(dst: eth->h_source, src: brmctx->br->dev->dev_addr); |
983 | ip_eth_mc_map(naddr: ip_dst, buf: eth->h_dest); |
984 | eth->h_proto = htons(ETH_P_IP); |
985 | skb_put(skb, len: sizeof(*eth)); |
986 | |
987 | skb_set_network_header(skb, offset: skb->len); |
988 | iph = ip_hdr(skb); |
989 | iph->tot_len = htons(pkt_size - sizeof(*eth)); |
990 | |
991 | iph->version = 4; |
992 | iph->ihl = 6; |
993 | iph->tos = 0xc0; |
994 | iph->id = 0; |
995 | iph->frag_off = htons(IP_DF); |
996 | iph->ttl = 1; |
997 | iph->protocol = IPPROTO_IGMP; |
998 | iph->saddr = br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_QUERY_USE_IFADDR) ? |
999 | inet_select_addr(dev: brmctx->br->dev, dst: 0, scope: RT_SCOPE_LINK) : 0; |
1000 | iph->daddr = ip_dst; |
1001 | ((u8 *)&iph[1])[0] = IPOPT_RA; |
1002 | ((u8 *)&iph[1])[1] = 4; |
1003 | ((u8 *)&iph[1])[2] = 0; |
1004 | ((u8 *)&iph[1])[3] = 0; |
1005 | ip_send_check(ip: iph); |
1006 | skb_put(skb, len: 24); |
1007 | |
1008 | skb_set_transport_header(skb, offset: skb->len); |
1009 | *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; |
1010 | |
1011 | switch (brmctx->multicast_igmp_version) { |
1012 | case 2: |
1013 | ih = igmp_hdr(skb); |
1014 | ih->type = IGMP_HOST_MEMBERSHIP_QUERY; |
1015 | ih->code = (group ? brmctx->multicast_last_member_interval : |
1016 | brmctx->multicast_query_response_interval) / |
1017 | (HZ / IGMP_TIMER_SCALE); |
1018 | ih->group = group; |
1019 | ih->csum = 0; |
1020 | csum = &ih->csum; |
1021 | csum_start = (void *)ih; |
1022 | break; |
1023 | case 3: |
1024 | ihv3 = igmpv3_query_hdr(skb); |
1025 | ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; |
1026 | ihv3->code = (group ? brmctx->multicast_last_member_interval : |
1027 | brmctx->multicast_query_response_interval) / |
1028 | (HZ / IGMP_TIMER_SCALE); |
1029 | ihv3->group = group; |
1030 | ihv3->qqic = brmctx->multicast_query_interval / HZ; |
1031 | ihv3->nsrcs = htons(lmqt_srcs); |
1032 | ihv3->resv = 0; |
1033 | ihv3->suppress = sflag; |
1034 | ihv3->qrv = 2; |
1035 | ihv3->csum = 0; |
1036 | csum = &ihv3->csum; |
1037 | csum_start = (void *)ihv3; |
1038 | if (!pg || !with_srcs) |
1039 | break; |
1040 | |
1041 | lmqt_srcs = 0; |
1042 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1043 | if (over_lmqt == time_after(ent->timer.expires, |
1044 | lmqt) && |
1045 | ent->src_query_rexmit_cnt > 0) { |
1046 | ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; |
1047 | ent->src_query_rexmit_cnt--; |
1048 | if (need_rexmit && ent->src_query_rexmit_cnt) |
1049 | *need_rexmit = true; |
1050 | } |
1051 | } |
1052 | if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { |
1053 | kfree_skb(skb); |
1054 | return NULL; |
1055 | } |
1056 | break; |
1057 | } |
1058 | |
1059 | if (WARN_ON(!csum || !csum_start)) { |
1060 | kfree_skb(skb); |
1061 | return NULL; |
1062 | } |
1063 | |
1064 | *csum = ip_compute_csum(buff: csum_start, len: igmp_hdr_size); |
1065 | skb_put(skb, len: igmp_hdr_size); |
1066 | __skb_pull(skb, len: sizeof(*eth)); |
1067 | |
1068 | out: |
1069 | return skb; |
1070 | } |
1071 | |
1072 | #if IS_ENABLED(CONFIG_IPV6) |
1073 | static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
1074 | struct net_bridge_mcast_port *pmctx, |
1075 | struct net_bridge_port_group *pg, |
1076 | const struct in6_addr *ip6_dst, |
1077 | const struct in6_addr *group, |
1078 | bool with_srcs, bool over_llqt, |
1079 | u8 sflag, u8 *igmp_type, |
1080 | bool *need_rexmit) |
1081 | { |
1082 | struct net_bridge_port *p = pg ? pg->key.port : NULL; |
1083 | struct net_bridge_group_src *ent; |
1084 | size_t pkt_size, mld_hdr_size; |
1085 | unsigned long now = jiffies; |
1086 | struct mld2_query *mld2q; |
1087 | void *csum_start = NULL; |
1088 | unsigned long interval; |
1089 | __sum16 *csum = NULL; |
1090 | struct ipv6hdr *ip6h; |
1091 | struct mld_msg *mldq; |
1092 | struct sk_buff *skb; |
1093 | unsigned long llqt; |
1094 | struct ethhdr *eth; |
1095 | u16 llqt_srcs = 0; |
1096 | u8 *hopopt; |
1097 | |
1098 | mld_hdr_size = sizeof(*mldq); |
1099 | if (brmctx->multicast_mld_version == 2) { |
1100 | mld_hdr_size = sizeof(*mld2q); |
1101 | if (pg && with_srcs) { |
1102 | llqt = now + (brmctx->multicast_last_member_interval * |
1103 | brmctx->multicast_last_member_count); |
1104 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1105 | if (over_llqt == time_after(ent->timer.expires, |
1106 | llqt) && |
1107 | ent->src_query_rexmit_cnt > 0) |
1108 | llqt_srcs++; |
1109 | } |
1110 | |
1111 | if (!llqt_srcs) |
1112 | return NULL; |
1113 | mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); |
1114 | } |
1115 | } |
1116 | |
1117 | pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; |
1118 | if ((p && pkt_size > p->dev->mtu) || |
1119 | pkt_size > brmctx->br->dev->mtu) |
1120 | return NULL; |
1121 | |
1122 | skb = netdev_alloc_skb_ip_align(dev: brmctx->br->dev, length: pkt_size); |
1123 | if (!skb) |
1124 | goto out; |
1125 | |
1126 | __br_multicast_query_handle_vlan(brmctx, pmctx, skb); |
1127 | skb->protocol = htons(ETH_P_IPV6); |
1128 | |
1129 | /* Ethernet header */ |
1130 | skb_reset_mac_header(skb); |
1131 | eth = eth_hdr(skb); |
1132 | |
1133 | ether_addr_copy(dst: eth->h_source, src: brmctx->br->dev->dev_addr); |
1134 | eth->h_proto = htons(ETH_P_IPV6); |
1135 | skb_put(skb, len: sizeof(*eth)); |
1136 | |
1137 | /* IPv6 header + HbH option */ |
1138 | skb_set_network_header(skb, offset: skb->len); |
1139 | ip6h = ipv6_hdr(skb); |
1140 | |
1141 | *(__force __be32 *)ip6h = htonl(0x60000000); |
1142 | ip6h->payload_len = htons(8 + mld_hdr_size); |
1143 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
1144 | ip6h->hop_limit = 1; |
1145 | ip6h->daddr = *ip6_dst; |
1146 | if (ipv6_dev_get_saddr(net: dev_net(dev: brmctx->br->dev), dev: brmctx->br->dev, |
1147 | daddr: &ip6h->daddr, srcprefs: 0, saddr: &ip6h->saddr)) { |
1148 | kfree_skb(skb); |
1149 | br_opt_toggle(br: brmctx->br, opt: BROPT_HAS_IPV6_ADDR, on: false); |
1150 | return NULL; |
1151 | } |
1152 | |
1153 | br_opt_toggle(br: brmctx->br, opt: BROPT_HAS_IPV6_ADDR, on: true); |
1154 | ipv6_eth_mc_map(addr: &ip6h->daddr, buf: eth->h_dest); |
1155 | |
1156 | hopopt = (u8 *)(ip6h + 1); |
1157 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ |
1158 | hopopt[1] = 0; /* length of HbH */ |
1159 | hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ |
1160 | hopopt[3] = 2; /* Length of RA Option */ |
1161 | hopopt[4] = 0; /* Type = 0x0000 (MLD) */ |
1162 | hopopt[5] = 0; |
1163 | hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ |
1164 | hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ |
1165 | |
1166 | skb_put(skb, len: sizeof(*ip6h) + 8); |
1167 | |
1168 | /* ICMPv6 */ |
1169 | skb_set_transport_header(skb, offset: skb->len); |
1170 | interval = ipv6_addr_any(a: group) ? |
1171 | brmctx->multicast_query_response_interval : |
1172 | brmctx->multicast_last_member_interval; |
1173 | *igmp_type = ICMPV6_MGM_QUERY; |
1174 | switch (brmctx->multicast_mld_version) { |
1175 | case 1: |
1176 | mldq = (struct mld_msg *)icmp6_hdr(skb); |
1177 | mldq->mld_type = ICMPV6_MGM_QUERY; |
1178 | mldq->mld_code = 0; |
1179 | mldq->mld_cksum = 0; |
1180 | mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); |
1181 | mldq->mld_reserved = 0; |
1182 | mldq->mld_mca = *group; |
1183 | csum = &mldq->mld_cksum; |
1184 | csum_start = (void *)mldq; |
1185 | break; |
1186 | case 2: |
1187 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
1188 | mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); |
1189 | mld2q->mld2q_type = ICMPV6_MGM_QUERY; |
1190 | mld2q->mld2q_code = 0; |
1191 | mld2q->mld2q_cksum = 0; |
1192 | mld2q->mld2q_resv1 = 0; |
1193 | mld2q->mld2q_resv2 = 0; |
1194 | mld2q->mld2q_suppress = sflag; |
1195 | mld2q->mld2q_qrv = 2; |
1196 | mld2q->mld2q_nsrcs = htons(llqt_srcs); |
1197 | mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; |
1198 | mld2q->mld2q_mca = *group; |
1199 | csum = &mld2q->mld2q_cksum; |
1200 | csum_start = (void *)mld2q; |
1201 | if (!pg || !with_srcs) |
1202 | break; |
1203 | |
1204 | llqt_srcs = 0; |
1205 | hlist_for_each_entry(ent, &pg->src_list, node) { |
1206 | if (over_llqt == time_after(ent->timer.expires, |
1207 | llqt) && |
1208 | ent->src_query_rexmit_cnt > 0) { |
1209 | mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; |
1210 | ent->src_query_rexmit_cnt--; |
1211 | if (need_rexmit && ent->src_query_rexmit_cnt) |
1212 | *need_rexmit = true; |
1213 | } |
1214 | } |
1215 | if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { |
1216 | kfree_skb(skb); |
1217 | return NULL; |
1218 | } |
1219 | break; |
1220 | } |
1221 | |
1222 | if (WARN_ON(!csum || !csum_start)) { |
1223 | kfree_skb(skb); |
1224 | return NULL; |
1225 | } |
1226 | |
1227 | *csum = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, len: mld_hdr_size, |
1228 | IPPROTO_ICMPV6, |
1229 | sum: csum_partial(buff: csum_start, len: mld_hdr_size, sum: 0)); |
1230 | skb_put(skb, len: mld_hdr_size); |
1231 | __skb_pull(skb, len: sizeof(*eth)); |
1232 | |
1233 | out: |
1234 | return skb; |
1235 | } |
1236 | #endif |
1237 | |
1238 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, |
1239 | struct net_bridge_mcast_port *pmctx, |
1240 | struct net_bridge_port_group *pg, |
1241 | struct br_ip *ip_dst, |
1242 | struct br_ip *group, |
1243 | bool with_srcs, bool over_lmqt, |
1244 | u8 sflag, u8 *igmp_type, |
1245 | bool *need_rexmit) |
1246 | { |
1247 | __be32 ip4_dst; |
1248 | |
1249 | switch (group->proto) { |
1250 | case htons(ETH_P_IP): |
1251 | ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); |
1252 | return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, |
1253 | ip_dst: ip4_dst, group: group->dst.ip4, |
1254 | with_srcs, over_lmqt, |
1255 | sflag, igmp_type, |
1256 | need_rexmit); |
1257 | #if IS_ENABLED(CONFIG_IPV6) |
1258 | case htons(ETH_P_IPV6): { |
1259 | struct in6_addr ip6_dst; |
1260 | |
1261 | if (ip_dst) |
1262 | ip6_dst = ip_dst->dst.ip6; |
1263 | else |
1264 | ipv6_addr_set(addr: &ip6_dst, htonl(0xff020000), w2: 0, w3: 0, |
1265 | htonl(1)); |
1266 | |
1267 | return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, |
1268 | ip6_dst: &ip6_dst, group: &group->dst.ip6, |
1269 | with_srcs, over_llqt: over_lmqt, |
1270 | sflag, igmp_type, |
1271 | need_rexmit); |
1272 | } |
1273 | #endif |
1274 | } |
1275 | return NULL; |
1276 | } |
1277 | |
1278 | struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, |
1279 | struct br_ip *group) |
1280 | { |
1281 | struct net_bridge_mdb_entry *mp; |
1282 | int err; |
1283 | |
1284 | mp = br_mdb_ip_get(br, dst: group); |
1285 | if (mp) |
1286 | return mp; |
1287 | |
1288 | if (atomic_read(v: &br->mdb_hash_tbl.nelems) >= br->hash_max) { |
1289 | trace_br_mdb_full(dev: br->dev, group); |
1290 | br_mc_disabled_update(dev: br->dev, value: false, NULL); |
1291 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: false); |
1292 | return ERR_PTR(error: -E2BIG); |
1293 | } |
1294 | |
1295 | mp = kzalloc(size: sizeof(*mp), GFP_ATOMIC); |
1296 | if (unlikely(!mp)) |
1297 | return ERR_PTR(error: -ENOMEM); |
1298 | |
1299 | mp->br = br; |
1300 | mp->addr = *group; |
1301 | mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; |
1302 | timer_setup(&mp->timer, br_multicast_group_expired, 0); |
1303 | err = rhashtable_lookup_insert_fast(ht: &br->mdb_hash_tbl, obj: &mp->rhnode, |
1304 | params: br_mdb_rht_params); |
1305 | if (err) { |
1306 | kfree(objp: mp); |
1307 | mp = ERR_PTR(error: err); |
1308 | } else { |
1309 | hlist_add_head_rcu(n: &mp->mdb_node, h: &br->mdb_list); |
1310 | } |
1311 | |
1312 | return mp; |
1313 | } |
1314 | |
1315 | static void br_multicast_group_src_expired(struct timer_list *t) |
1316 | { |
1317 | struct net_bridge_group_src *src = from_timer(src, t, timer); |
1318 | struct net_bridge_port_group *pg; |
1319 | struct net_bridge *br = src->br; |
1320 | |
1321 | spin_lock(lock: &br->multicast_lock); |
1322 | if (hlist_unhashed(h: &src->node) || !netif_running(dev: br->dev) || |
1323 | timer_pending(timer: &src->timer)) |
1324 | goto out; |
1325 | |
1326 | pg = src->pg; |
1327 | if (pg->filter_mode == MCAST_INCLUDE) { |
1328 | br_multicast_del_group_src(src, fastleave: false); |
1329 | if (!hlist_empty(h: &pg->src_list)) |
1330 | goto out; |
1331 | br_multicast_find_del_pg(br, pg); |
1332 | } else { |
1333 | br_multicast_fwd_src_handle(src); |
1334 | } |
1335 | |
1336 | out: |
1337 | spin_unlock(lock: &br->multicast_lock); |
1338 | } |
1339 | |
1340 | struct net_bridge_group_src * |
1341 | br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) |
1342 | { |
1343 | struct net_bridge_group_src *ent; |
1344 | |
1345 | switch (ip->proto) { |
1346 | case htons(ETH_P_IP): |
1347 | hlist_for_each_entry(ent, &pg->src_list, node) |
1348 | if (ip->src.ip4 == ent->addr.src.ip4) |
1349 | return ent; |
1350 | break; |
1351 | #if IS_ENABLED(CONFIG_IPV6) |
1352 | case htons(ETH_P_IPV6): |
1353 | hlist_for_each_entry(ent, &pg->src_list, node) |
1354 | if (!ipv6_addr_cmp(a1: &ent->addr.src.ip6, a2: &ip->src.ip6)) |
1355 | return ent; |
1356 | break; |
1357 | #endif |
1358 | } |
1359 | |
1360 | return NULL; |
1361 | } |
1362 | |
1363 | struct net_bridge_group_src * |
1364 | br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) |
1365 | { |
1366 | struct net_bridge_group_src *grp_src; |
1367 | |
1368 | if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) |
1369 | return NULL; |
1370 | |
1371 | switch (src_ip->proto) { |
1372 | case htons(ETH_P_IP): |
1373 | if (ipv4_is_zeronet(addr: src_ip->src.ip4) || |
1374 | ipv4_is_multicast(addr: src_ip->src.ip4)) |
1375 | return NULL; |
1376 | break; |
1377 | #if IS_ENABLED(CONFIG_IPV6) |
1378 | case htons(ETH_P_IPV6): |
1379 | if (ipv6_addr_any(a: &src_ip->src.ip6) || |
1380 | ipv6_addr_is_multicast(addr: &src_ip->src.ip6)) |
1381 | return NULL; |
1382 | break; |
1383 | #endif |
1384 | } |
1385 | |
1386 | grp_src = kzalloc(size: sizeof(*grp_src), GFP_ATOMIC); |
1387 | if (unlikely(!grp_src)) |
1388 | return NULL; |
1389 | |
1390 | grp_src->pg = pg; |
1391 | grp_src->br = pg->key.port->br; |
1392 | grp_src->addr = *src_ip; |
1393 | grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; |
1394 | timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); |
1395 | |
1396 | hlist_add_head_rcu(n: &grp_src->node, h: &pg->src_list); |
1397 | pg->src_ents++; |
1398 | |
1399 | return grp_src; |
1400 | } |
1401 | |
1402 | struct net_bridge_port_group *br_multicast_new_port_group( |
1403 | struct net_bridge_port *port, |
1404 | const struct br_ip *group, |
1405 | struct net_bridge_port_group __rcu *next, |
1406 | unsigned char flags, |
1407 | const unsigned char *src, |
1408 | u8 filter_mode, |
1409 | u8 rt_protocol, |
1410 | struct netlink_ext_ack *extack) |
1411 | { |
1412 | struct net_bridge_port_group *p; |
1413 | int err; |
1414 | |
1415 | err = br_multicast_port_ngroups_inc(port, group, extack); |
1416 | if (err) |
1417 | return NULL; |
1418 | |
1419 | p = kzalloc(size: sizeof(*p), GFP_ATOMIC); |
1420 | if (unlikely(!p)) { |
1421 | NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group"); |
1422 | goto dec_out; |
1423 | } |
1424 | |
1425 | p->key.addr = *group; |
1426 | p->key.port = port; |
1427 | p->flags = flags; |
1428 | p->filter_mode = filter_mode; |
1429 | p->rt_protocol = rt_protocol; |
1430 | p->eht_host_tree = RB_ROOT; |
1431 | p->eht_set_tree = RB_ROOT; |
1432 | p->mcast_gc.destroy = br_multicast_destroy_port_group; |
1433 | INIT_HLIST_HEAD(&p->src_list); |
1434 | |
1435 | if (!br_multicast_is_star_g(ip: group) && |
1436 | rhashtable_lookup_insert_fast(ht: &port->br->sg_port_tbl, obj: &p->rhnode, |
1437 | params: br_sg_port_rht_params)) { |
1438 | NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group"); |
1439 | goto free_out; |
1440 | } |
1441 | |
1442 | rcu_assign_pointer(p->next, next); |
1443 | timer_setup(&p->timer, br_multicast_port_group_expired, 0); |
1444 | timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); |
1445 | hlist_add_head(n: &p->mglist, h: &port->mglist); |
1446 | |
1447 | if (src) |
1448 | memcpy(p->eth_addr, src, ETH_ALEN); |
1449 | else |
1450 | eth_broadcast_addr(addr: p->eth_addr); |
1451 | |
1452 | return p; |
1453 | |
1454 | free_out: |
1455 | kfree(objp: p); |
1456 | dec_out: |
1457 | br_multicast_port_ngroups_dec(port, vid: group->vid); |
1458 | return NULL; |
1459 | } |
1460 | |
1461 | void br_multicast_del_port_group(struct net_bridge_port_group *p) |
1462 | { |
1463 | struct net_bridge_port *port = p->key.port; |
1464 | __u16 vid = p->key.addr.vid; |
1465 | |
1466 | hlist_del_init(n: &p->mglist); |
1467 | if (!br_multicast_is_star_g(ip: &p->key.addr)) |
1468 | rhashtable_remove_fast(ht: &port->br->sg_port_tbl, obj: &p->rhnode, |
1469 | params: br_sg_port_rht_params); |
1470 | kfree(objp: p); |
1471 | br_multicast_port_ngroups_dec(port, vid); |
1472 | } |
1473 | |
1474 | void br_multicast_host_join(const struct net_bridge_mcast *brmctx, |
1475 | struct net_bridge_mdb_entry *mp, bool notify) |
1476 | { |
1477 | if (!mp->host_joined) { |
1478 | mp->host_joined = true; |
1479 | if (br_multicast_is_star_g(ip: &mp->addr)) |
1480 | br_multicast_star_g_host_state(star_mp: mp); |
1481 | if (notify) |
1482 | br_mdb_notify(dev: mp->br->dev, mp, NULL, RTM_NEWMDB); |
1483 | } |
1484 | |
1485 | if (br_group_is_l2(group: &mp->addr)) |
1486 | return; |
1487 | |
1488 | mod_timer(timer: &mp->timer, expires: jiffies + brmctx->multicast_membership_interval); |
1489 | } |
1490 | |
1491 | void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) |
1492 | { |
1493 | if (!mp->host_joined) |
1494 | return; |
1495 | |
1496 | mp->host_joined = false; |
1497 | if (br_multicast_is_star_g(ip: &mp->addr)) |
1498 | br_multicast_star_g_host_state(star_mp: mp); |
1499 | if (notify) |
1500 | br_mdb_notify(dev: mp->br->dev, mp, NULL, RTM_DELMDB); |
1501 | } |
1502 | |
1503 | static struct net_bridge_port_group * |
1504 | __br_multicast_add_group(struct net_bridge_mcast *brmctx, |
1505 | struct net_bridge_mcast_port *pmctx, |
1506 | struct br_ip *group, |
1507 | const unsigned char *src, |
1508 | u8 filter_mode, |
1509 | bool igmpv2_mldv1, |
1510 | bool blocked) |
1511 | { |
1512 | struct net_bridge_port_group __rcu **pp; |
1513 | struct net_bridge_port_group *p = NULL; |
1514 | struct net_bridge_mdb_entry *mp; |
1515 | unsigned long now = jiffies; |
1516 | |
1517 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
1518 | goto out; |
1519 | |
1520 | mp = br_multicast_new_group(br: brmctx->br, group); |
1521 | if (IS_ERR(ptr: mp)) |
1522 | return ERR_CAST(ptr: mp); |
1523 | |
1524 | if (!pmctx) { |
1525 | br_multicast_host_join(brmctx, mp, notify: true); |
1526 | goto out; |
1527 | } |
1528 | |
1529 | for (pp = &mp->ports; |
1530 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
1531 | pp = &p->next) { |
1532 | if (br_port_group_equal(p, port: pmctx->port, src)) |
1533 | goto found; |
1534 | if ((unsigned long)p->key.port < (unsigned long)pmctx->port) |
1535 | break; |
1536 | } |
1537 | |
1538 | p = br_multicast_new_port_group(port: pmctx->port, group, next: *pp, flags: 0, src, |
1539 | filter_mode, RTPROT_KERNEL, NULL); |
1540 | if (unlikely(!p)) { |
1541 | p = ERR_PTR(error: -ENOMEM); |
1542 | goto out; |
1543 | } |
1544 | rcu_assign_pointer(*pp, p); |
1545 | if (blocked) |
1546 | p->flags |= MDB_PG_FLAGS_BLOCKED; |
1547 | br_mdb_notify(dev: brmctx->br->dev, mp, pg: p, RTM_NEWMDB); |
1548 | |
1549 | found: |
1550 | if (igmpv2_mldv1) |
1551 | mod_timer(timer: &p->timer, |
1552 | expires: now + brmctx->multicast_membership_interval); |
1553 | |
1554 | out: |
1555 | return p; |
1556 | } |
1557 | |
1558 | static int br_multicast_add_group(struct net_bridge_mcast *brmctx, |
1559 | struct net_bridge_mcast_port *pmctx, |
1560 | struct br_ip *group, |
1561 | const unsigned char *src, |
1562 | u8 filter_mode, |
1563 | bool igmpv2_mldv1) |
1564 | { |
1565 | struct net_bridge_port_group *pg; |
1566 | int err; |
1567 | |
1568 | spin_lock(lock: &brmctx->br->multicast_lock); |
1569 | pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, |
1570 | igmpv2_mldv1, blocked: false); |
1571 | /* NULL is considered valid for host joined groups */ |
1572 | err = PTR_ERR_OR_ZERO(ptr: pg); |
1573 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1574 | |
1575 | return err; |
1576 | } |
1577 | |
1578 | static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, |
1579 | struct net_bridge_mcast_port *pmctx, |
1580 | __be32 group, |
1581 | __u16 vid, |
1582 | const unsigned char *src, |
1583 | bool igmpv2) |
1584 | { |
1585 | struct br_ip br_group; |
1586 | u8 filter_mode; |
1587 | |
1588 | if (ipv4_is_local_multicast(addr: group)) |
1589 | return 0; |
1590 | |
1591 | memset(&br_group, 0, sizeof(br_group)); |
1592 | br_group.dst.ip4 = group; |
1593 | br_group.proto = htons(ETH_P_IP); |
1594 | br_group.vid = vid; |
1595 | filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; |
1596 | |
1597 | return br_multicast_add_group(brmctx, pmctx, group: &br_group, src, |
1598 | filter_mode, igmpv2_mldv1: igmpv2); |
1599 | } |
1600 | |
1601 | #if IS_ENABLED(CONFIG_IPV6) |
1602 | static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, |
1603 | struct net_bridge_mcast_port *pmctx, |
1604 | const struct in6_addr *group, |
1605 | __u16 vid, |
1606 | const unsigned char *src, |
1607 | bool mldv1) |
1608 | { |
1609 | struct br_ip br_group; |
1610 | u8 filter_mode; |
1611 | |
1612 | if (ipv6_addr_is_ll_all_nodes(addr: group)) |
1613 | return 0; |
1614 | |
1615 | memset(&br_group, 0, sizeof(br_group)); |
1616 | br_group.dst.ip6 = *group; |
1617 | br_group.proto = htons(ETH_P_IPV6); |
1618 | br_group.vid = vid; |
1619 | filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; |
1620 | |
1621 | return br_multicast_add_group(brmctx, pmctx, group: &br_group, src, |
1622 | filter_mode, igmpv2_mldv1: mldv1); |
1623 | } |
1624 | #endif |
1625 | |
1626 | static bool br_multicast_rport_del(struct hlist_node *rlist) |
1627 | { |
1628 | if (hlist_unhashed(h: rlist)) |
1629 | return false; |
1630 | |
1631 | hlist_del_init_rcu(n: rlist); |
1632 | return true; |
1633 | } |
1634 | |
1635 | static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) |
1636 | { |
1637 | return br_multicast_rport_del(rlist: &pmctx->ip4_rlist); |
1638 | } |
1639 | |
1640 | static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) |
1641 | { |
1642 | #if IS_ENABLED(CONFIG_IPV6) |
1643 | return br_multicast_rport_del(rlist: &pmctx->ip6_rlist); |
1644 | #else |
1645 | return false; |
1646 | #endif |
1647 | } |
1648 | |
1649 | static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, |
1650 | struct timer_list *t, |
1651 | struct hlist_node *rlist) |
1652 | { |
1653 | struct net_bridge *br = pmctx->port->br; |
1654 | bool del; |
1655 | |
1656 | spin_lock(lock: &br->multicast_lock); |
1657 | if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
1658 | pmctx->multicast_router == MDB_RTR_TYPE_PERM || |
1659 | timer_pending(timer: t)) |
1660 | goto out; |
1661 | |
1662 | del = br_multicast_rport_del(rlist); |
1663 | br_multicast_rport_del_notify(pmctx, deleted: del); |
1664 | out: |
1665 | spin_unlock(lock: &br->multicast_lock); |
1666 | } |
1667 | |
1668 | static void br_ip4_multicast_router_expired(struct timer_list *t) |
1669 | { |
1670 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1671 | ip4_mc_router_timer); |
1672 | |
1673 | br_multicast_router_expired(pmctx, t, rlist: &pmctx->ip4_rlist); |
1674 | } |
1675 | |
1676 | #if IS_ENABLED(CONFIG_IPV6) |
1677 | static void br_ip6_multicast_router_expired(struct timer_list *t) |
1678 | { |
1679 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1680 | ip6_mc_router_timer); |
1681 | |
1682 | br_multicast_router_expired(pmctx, t, rlist: &pmctx->ip6_rlist); |
1683 | } |
1684 | #endif |
1685 | |
1686 | static void br_mc_router_state_change(struct net_bridge *p, |
1687 | bool is_mc_router) |
1688 | { |
1689 | struct switchdev_attr attr = { |
1690 | .orig_dev = p->dev, |
1691 | .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, |
1692 | .flags = SWITCHDEV_F_DEFER, |
1693 | .u.mrouter = is_mc_router, |
1694 | }; |
1695 | |
1696 | switchdev_port_attr_set(dev: p->dev, attr: &attr, NULL); |
1697 | } |
1698 | |
1699 | static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, |
1700 | struct timer_list *timer) |
1701 | { |
1702 | spin_lock(lock: &brmctx->br->multicast_lock); |
1703 | if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
1704 | brmctx->multicast_router == MDB_RTR_TYPE_PERM || |
1705 | br_ip4_multicast_is_router(brmctx) || |
1706 | br_ip6_multicast_is_router(brmctx)) |
1707 | goto out; |
1708 | |
1709 | br_mc_router_state_change(p: brmctx->br, is_mc_router: false); |
1710 | out: |
1711 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1712 | } |
1713 | |
1714 | static void br_ip4_multicast_local_router_expired(struct timer_list *t) |
1715 | { |
1716 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1717 | ip4_mc_router_timer); |
1718 | |
1719 | br_multicast_local_router_expired(brmctx, timer: t); |
1720 | } |
1721 | |
1722 | #if IS_ENABLED(CONFIG_IPV6) |
1723 | static void br_ip6_multicast_local_router_expired(struct timer_list *t) |
1724 | { |
1725 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1726 | ip6_mc_router_timer); |
1727 | |
1728 | br_multicast_local_router_expired(brmctx, timer: t); |
1729 | } |
1730 | #endif |
1731 | |
1732 | static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, |
1733 | struct bridge_mcast_own_query *query) |
1734 | { |
1735 | spin_lock(lock: &brmctx->br->multicast_lock); |
1736 | if (!netif_running(dev: brmctx->br->dev) || |
1737 | br_multicast_ctx_vlan_global_disabled(brmctx) || |
1738 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
1739 | goto out; |
1740 | |
1741 | br_multicast_start_querier(brmctx, query); |
1742 | |
1743 | out: |
1744 | spin_unlock(lock: &brmctx->br->multicast_lock); |
1745 | } |
1746 | |
1747 | static void br_ip4_multicast_querier_expired(struct timer_list *t) |
1748 | { |
1749 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1750 | ip4_other_query.timer); |
1751 | |
1752 | br_multicast_querier_expired(brmctx, query: &brmctx->ip4_own_query); |
1753 | } |
1754 | |
1755 | #if IS_ENABLED(CONFIG_IPV6) |
1756 | static void br_ip6_multicast_querier_expired(struct timer_list *t) |
1757 | { |
1758 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
1759 | ip6_other_query.timer); |
1760 | |
1761 | br_multicast_querier_expired(brmctx, query: &brmctx->ip6_own_query); |
1762 | } |
1763 | #endif |
1764 | |
1765 | static void br_multicast_query_delay_expired(struct timer_list *t) |
1766 | { |
1767 | } |
1768 | |
1769 | static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, |
1770 | struct br_ip *ip, |
1771 | struct sk_buff *skb) |
1772 | { |
1773 | if (ip->proto == htons(ETH_P_IP)) |
1774 | brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; |
1775 | #if IS_ENABLED(CONFIG_IPV6) |
1776 | else |
1777 | brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; |
1778 | #endif |
1779 | } |
1780 | |
1781 | static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, |
1782 | struct net_bridge_mcast_port *pmctx, |
1783 | struct net_bridge_port_group *pg, |
1784 | struct br_ip *ip_dst, |
1785 | struct br_ip *group, |
1786 | bool with_srcs, |
1787 | u8 sflag, |
1788 | bool *need_rexmit) |
1789 | { |
1790 | bool over_lmqt = !!sflag; |
1791 | struct sk_buff *skb; |
1792 | u8 igmp_type; |
1793 | |
1794 | if (!br_multicast_ctx_should_use(brmctx, pmctx) || |
1795 | !br_multicast_ctx_matches_vlan_snooping(brmctx)) |
1796 | return; |
1797 | |
1798 | again_under_lmqt: |
1799 | skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, |
1800 | with_srcs, over_lmqt, sflag, igmp_type: &igmp_type, |
1801 | need_rexmit); |
1802 | if (!skb) |
1803 | return; |
1804 | |
1805 | if (pmctx) { |
1806 | skb->dev = pmctx->port->dev; |
1807 | br_multicast_count(br: brmctx->br, p: pmctx->port, skb, type: igmp_type, |
1808 | dir: BR_MCAST_DIR_TX); |
1809 | NF_HOOK(pf: NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, |
1810 | net: dev_net(dev: pmctx->port->dev), NULL, skb, NULL, out: skb->dev, |
1811 | okfn: br_dev_queue_push_xmit); |
1812 | |
1813 | if (over_lmqt && with_srcs && sflag) { |
1814 | over_lmqt = false; |
1815 | goto again_under_lmqt; |
1816 | } |
1817 | } else { |
1818 | br_multicast_select_own_querier(brmctx, ip: group, skb); |
1819 | br_multicast_count(br: brmctx->br, NULL, skb, type: igmp_type, |
1820 | dir: BR_MCAST_DIR_RX); |
1821 | netif_rx(skb); |
1822 | } |
1823 | } |
1824 | |
1825 | static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, |
1826 | struct bridge_mcast_querier *dest) |
1827 | { |
1828 | unsigned int seq; |
1829 | |
1830 | memset(dest, 0, sizeof(*dest)); |
1831 | do { |
1832 | seq = read_seqcount_begin(&querier->seq); |
1833 | dest->port_ifidx = querier->port_ifidx; |
1834 | memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); |
1835 | } while (read_seqcount_retry(&querier->seq, seq)); |
1836 | } |
1837 | |
1838 | static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, |
1839 | struct bridge_mcast_querier *querier, |
1840 | int ifindex, |
1841 | struct br_ip *saddr) |
1842 | { |
1843 | write_seqcount_begin(&querier->seq); |
1844 | querier->port_ifidx = ifindex; |
1845 | memcpy(&querier->addr, saddr, sizeof(*saddr)); |
1846 | write_seqcount_end(&querier->seq); |
1847 | } |
1848 | |
1849 | static void br_multicast_send_query(struct net_bridge_mcast *brmctx, |
1850 | struct net_bridge_mcast_port *pmctx, |
1851 | struct bridge_mcast_own_query *own_query) |
1852 | { |
1853 | struct bridge_mcast_other_query *other_query = NULL; |
1854 | struct bridge_mcast_querier *querier; |
1855 | struct br_ip br_group; |
1856 | unsigned long time; |
1857 | |
1858 | if (!br_multicast_ctx_should_use(brmctx, pmctx) || |
1859 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED) || |
1860 | !brmctx->multicast_querier) |
1861 | return; |
1862 | |
1863 | memset(&br_group.dst, 0, sizeof(br_group.dst)); |
1864 | |
1865 | if (pmctx ? (own_query == &pmctx->ip4_own_query) : |
1866 | (own_query == &brmctx->ip4_own_query)) { |
1867 | querier = &brmctx->ip4_querier; |
1868 | other_query = &brmctx->ip4_other_query; |
1869 | br_group.proto = htons(ETH_P_IP); |
1870 | #if IS_ENABLED(CONFIG_IPV6) |
1871 | } else { |
1872 | querier = &brmctx->ip6_querier; |
1873 | other_query = &brmctx->ip6_other_query; |
1874 | br_group.proto = htons(ETH_P_IPV6); |
1875 | #endif |
1876 | } |
1877 | |
1878 | if (!other_query || timer_pending(timer: &other_query->timer)) |
1879 | return; |
1880 | |
1881 | /* we're about to select ourselves as querier */ |
1882 | if (!pmctx && querier->port_ifidx) { |
1883 | struct br_ip zeroip = {}; |
1884 | |
1885 | br_multicast_update_querier(brmctx, querier, ifindex: 0, saddr: &zeroip); |
1886 | } |
1887 | |
1888 | __br_multicast_send_query(brmctx, pmctx, NULL, NULL, group: &br_group, with_srcs: false, |
1889 | sflag: 0, NULL); |
1890 | |
1891 | time = jiffies; |
1892 | time += own_query->startup_sent < brmctx->multicast_startup_query_count ? |
1893 | brmctx->multicast_startup_query_interval : |
1894 | brmctx->multicast_query_interval; |
1895 | mod_timer(timer: &own_query->timer, expires: time); |
1896 | } |
1897 | |
1898 | static void |
1899 | br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, |
1900 | struct bridge_mcast_own_query *query) |
1901 | { |
1902 | struct net_bridge *br = pmctx->port->br; |
1903 | struct net_bridge_mcast *brmctx; |
1904 | |
1905 | spin_lock(lock: &br->multicast_lock); |
1906 | if (br_multicast_port_ctx_state_stopped(pmctx)) |
1907 | goto out; |
1908 | |
1909 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
1910 | if (query->startup_sent < brmctx->multicast_startup_query_count) |
1911 | query->startup_sent++; |
1912 | |
1913 | br_multicast_send_query(brmctx, pmctx, own_query: query); |
1914 | |
1915 | out: |
1916 | spin_unlock(lock: &br->multicast_lock); |
1917 | } |
1918 | |
1919 | static void br_ip4_multicast_port_query_expired(struct timer_list *t) |
1920 | { |
1921 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1922 | ip4_own_query.timer); |
1923 | |
1924 | br_multicast_port_query_expired(pmctx, query: &pmctx->ip4_own_query); |
1925 | } |
1926 | |
1927 | #if IS_ENABLED(CONFIG_IPV6) |
1928 | static void br_ip6_multicast_port_query_expired(struct timer_list *t) |
1929 | { |
1930 | struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, |
1931 | ip6_own_query.timer); |
1932 | |
1933 | br_multicast_port_query_expired(pmctx, query: &pmctx->ip6_own_query); |
1934 | } |
1935 | #endif |
1936 | |
1937 | static void br_multicast_port_group_rexmit(struct timer_list *t) |
1938 | { |
1939 | struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); |
1940 | struct bridge_mcast_other_query *other_query = NULL; |
1941 | struct net_bridge *br = pg->key.port->br; |
1942 | struct net_bridge_mcast_port *pmctx; |
1943 | struct net_bridge_mcast *brmctx; |
1944 | bool need_rexmit = false; |
1945 | |
1946 | spin_lock(lock: &br->multicast_lock); |
1947 | if (!netif_running(dev: br->dev) || hlist_unhashed(h: &pg->mglist) || |
1948 | !br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
1949 | goto out; |
1950 | |
1951 | pmctx = br_multicast_pg_to_port_ctx(pg); |
1952 | if (!pmctx) |
1953 | goto out; |
1954 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
1955 | if (!brmctx->multicast_querier) |
1956 | goto out; |
1957 | |
1958 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
1959 | other_query = &brmctx->ip4_other_query; |
1960 | #if IS_ENABLED(CONFIG_IPV6) |
1961 | else |
1962 | other_query = &brmctx->ip6_other_query; |
1963 | #endif |
1964 | |
1965 | if (!other_query || timer_pending(timer: &other_query->timer)) |
1966 | goto out; |
1967 | |
1968 | if (pg->grp_query_rexmit_cnt) { |
1969 | pg->grp_query_rexmit_cnt--; |
1970 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
1971 | group: &pg->key.addr, with_srcs: false, sflag: 1, NULL); |
1972 | } |
1973 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
1974 | group: &pg->key.addr, with_srcs: true, sflag: 0, need_rexmit: &need_rexmit); |
1975 | |
1976 | if (pg->grp_query_rexmit_cnt || need_rexmit) |
1977 | mod_timer(timer: &pg->rexmit_timer, expires: jiffies + |
1978 | brmctx->multicast_last_member_interval); |
1979 | out: |
1980 | spin_unlock(lock: &br->multicast_lock); |
1981 | } |
1982 | |
1983 | static int br_mc_disabled_update(struct net_device *dev, bool value, |
1984 | struct netlink_ext_ack *extack) |
1985 | { |
1986 | struct switchdev_attr attr = { |
1987 | .orig_dev = dev, |
1988 | .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, |
1989 | .flags = SWITCHDEV_F_DEFER, |
1990 | .u.mc_disabled = !value, |
1991 | }; |
1992 | |
1993 | return switchdev_port_attr_set(dev, attr: &attr, extack); |
1994 | } |
1995 | |
1996 | void br_multicast_port_ctx_init(struct net_bridge_port *port, |
1997 | struct net_bridge_vlan *vlan, |
1998 | struct net_bridge_mcast_port *pmctx) |
1999 | { |
2000 | pmctx->port = port; |
2001 | pmctx->vlan = vlan; |
2002 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
2003 | timer_setup(&pmctx->ip4_mc_router_timer, |
2004 | br_ip4_multicast_router_expired, 0); |
2005 | timer_setup(&pmctx->ip4_own_query.timer, |
2006 | br_ip4_multicast_port_query_expired, 0); |
2007 | #if IS_ENABLED(CONFIG_IPV6) |
2008 | timer_setup(&pmctx->ip6_mc_router_timer, |
2009 | br_ip6_multicast_router_expired, 0); |
2010 | timer_setup(&pmctx->ip6_own_query.timer, |
2011 | br_ip6_multicast_port_query_expired, 0); |
2012 | #endif |
2013 | } |
2014 | |
2015 | void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) |
2016 | { |
2017 | #if IS_ENABLED(CONFIG_IPV6) |
2018 | del_timer_sync(timer: &pmctx->ip6_mc_router_timer); |
2019 | #endif |
2020 | del_timer_sync(timer: &pmctx->ip4_mc_router_timer); |
2021 | } |
2022 | |
2023 | int br_multicast_add_port(struct net_bridge_port *port) |
2024 | { |
2025 | int err; |
2026 | |
2027 | port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; |
2028 | br_multicast_port_ctx_init(port, NULL, pmctx: &port->multicast_ctx); |
2029 | |
2030 | err = br_mc_disabled_update(dev: port->dev, |
2031 | value: br_opt_get(br: port->br, |
2032 | opt: BROPT_MULTICAST_ENABLED), |
2033 | NULL); |
2034 | if (err && err != -EOPNOTSUPP) |
2035 | return err; |
2036 | |
2037 | port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); |
2038 | if (!port->mcast_stats) |
2039 | return -ENOMEM; |
2040 | |
2041 | return 0; |
2042 | } |
2043 | |
2044 | void br_multicast_del_port(struct net_bridge_port *port) |
2045 | { |
2046 | struct net_bridge *br = port->br; |
2047 | struct net_bridge_port_group *pg; |
2048 | HLIST_HEAD(deleted_head); |
2049 | struct hlist_node *n; |
2050 | |
2051 | /* Take care of the remaining groups, only perm ones should be left */ |
2052 | spin_lock_bh(lock: &br->multicast_lock); |
2053 | hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) |
2054 | br_multicast_find_del_pg(br, pg); |
2055 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
2056 | spin_unlock_bh(lock: &br->multicast_lock); |
2057 | br_multicast_gc(head: &deleted_head); |
2058 | br_multicast_port_ctx_deinit(pmctx: &port->multicast_ctx); |
2059 | free_percpu(pdata: port->mcast_stats); |
2060 | } |
2061 | |
2062 | static void br_multicast_enable(struct bridge_mcast_own_query *query) |
2063 | { |
2064 | query->startup_sent = 0; |
2065 | |
2066 | if (try_to_del_timer_sync(timer: &query->timer) >= 0 || |
2067 | del_timer(timer: &query->timer)) |
2068 | mod_timer(timer: &query->timer, expires: jiffies); |
2069 | } |
2070 | |
2071 | static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) |
2072 | { |
2073 | struct net_bridge *br = pmctx->port->br; |
2074 | struct net_bridge_mcast *brmctx; |
2075 | |
2076 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
2077 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) || |
2078 | !netif_running(dev: br->dev)) |
2079 | return; |
2080 | |
2081 | br_multicast_enable(query: &pmctx->ip4_own_query); |
2082 | #if IS_ENABLED(CONFIG_IPV6) |
2083 | br_multicast_enable(query: &pmctx->ip6_own_query); |
2084 | #endif |
2085 | if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { |
2086 | br_ip4_multicast_add_router(brmctx, pmctx); |
2087 | br_ip6_multicast_add_router(brmctx, pmctx); |
2088 | } |
2089 | |
2090 | if (br_multicast_port_ctx_is_vlan(pmctx)) { |
2091 | struct net_bridge_port_group *pg; |
2092 | u32 n = 0; |
2093 | |
2094 | /* The mcast_n_groups counter might be wrong. First, |
2095 | * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries |
2096 | * are flushed, thus mcast_n_groups after the toggle does not |
2097 | * reflect the true values. And second, permanent entries added |
2098 | * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected |
2099 | * either. Thus we have to refresh the counter. |
2100 | */ |
2101 | |
2102 | hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) { |
2103 | if (pg->key.addr.vid == pmctx->vlan->vid) |
2104 | n++; |
2105 | } |
2106 | WRITE_ONCE(pmctx->mdb_n_entries, n); |
2107 | } |
2108 | } |
2109 | |
2110 | void br_multicast_enable_port(struct net_bridge_port *port) |
2111 | { |
2112 | struct net_bridge *br = port->br; |
2113 | |
2114 | spin_lock_bh(lock: &br->multicast_lock); |
2115 | __br_multicast_enable_port_ctx(pmctx: &port->multicast_ctx); |
2116 | spin_unlock_bh(lock: &br->multicast_lock); |
2117 | } |
2118 | |
2119 | static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) |
2120 | { |
2121 | struct net_bridge_port_group *pg; |
2122 | struct hlist_node *n; |
2123 | bool del = false; |
2124 | |
2125 | hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) |
2126 | if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && |
2127 | (!br_multicast_port_ctx_is_vlan(pmctx) || |
2128 | pg->key.addr.vid == pmctx->vlan->vid)) |
2129 | br_multicast_find_del_pg(br: pmctx->port->br, pg); |
2130 | |
2131 | del |= br_ip4_multicast_rport_del(pmctx); |
2132 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
2133 | del_timer(timer: &pmctx->ip4_own_query.timer); |
2134 | del |= br_ip6_multicast_rport_del(pmctx); |
2135 | #if IS_ENABLED(CONFIG_IPV6) |
2136 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
2137 | del_timer(timer: &pmctx->ip6_own_query.timer); |
2138 | #endif |
2139 | br_multicast_rport_del_notify(pmctx, deleted: del); |
2140 | } |
2141 | |
2142 | void br_multicast_disable_port(struct net_bridge_port *port) |
2143 | { |
2144 | spin_lock_bh(lock: &port->br->multicast_lock); |
2145 | __br_multicast_disable_port_ctx(pmctx: &port->multicast_ctx); |
2146 | spin_unlock_bh(lock: &port->br->multicast_lock); |
2147 | } |
2148 | |
2149 | static int __grp_src_delete_marked(struct net_bridge_port_group *pg) |
2150 | { |
2151 | struct net_bridge_group_src *ent; |
2152 | struct hlist_node *tmp; |
2153 | int deleted = 0; |
2154 | |
2155 | hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) |
2156 | if (ent->flags & BR_SGRP_F_DELETE) { |
2157 | br_multicast_del_group_src(src: ent, fastleave: false); |
2158 | deleted++; |
2159 | } |
2160 | |
2161 | return deleted; |
2162 | } |
2163 | |
2164 | static void __grp_src_mod_timer(struct net_bridge_group_src *src, |
2165 | unsigned long expires) |
2166 | { |
2167 | mod_timer(timer: &src->timer, expires); |
2168 | br_multicast_fwd_src_handle(src); |
2169 | } |
2170 | |
2171 | static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, |
2172 | struct net_bridge_mcast_port *pmctx, |
2173 | struct net_bridge_port_group *pg) |
2174 | { |
2175 | struct bridge_mcast_other_query *other_query = NULL; |
2176 | u32 lmqc = brmctx->multicast_last_member_count; |
2177 | unsigned long lmqt, lmi, now = jiffies; |
2178 | struct net_bridge_group_src *ent; |
2179 | |
2180 | if (!netif_running(dev: brmctx->br->dev) || |
2181 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
2182 | return; |
2183 | |
2184 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
2185 | other_query = &brmctx->ip4_other_query; |
2186 | #if IS_ENABLED(CONFIG_IPV6) |
2187 | else |
2188 | other_query = &brmctx->ip6_other_query; |
2189 | #endif |
2190 | |
2191 | lmqt = now + br_multicast_lmqt(brmctx); |
2192 | hlist_for_each_entry(ent, &pg->src_list, node) { |
2193 | if (ent->flags & BR_SGRP_F_SEND) { |
2194 | ent->flags &= ~BR_SGRP_F_SEND; |
2195 | if (ent->timer.expires > lmqt) { |
2196 | if (brmctx->multicast_querier && |
2197 | other_query && |
2198 | !timer_pending(timer: &other_query->timer)) |
2199 | ent->src_query_rexmit_cnt = lmqc; |
2200 | __grp_src_mod_timer(src: ent, expires: lmqt); |
2201 | } |
2202 | } |
2203 | } |
2204 | |
2205 | if (!brmctx->multicast_querier || |
2206 | !other_query || timer_pending(timer: &other_query->timer)) |
2207 | return; |
2208 | |
2209 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
2210 | group: &pg->key.addr, with_srcs: true, sflag: 1, NULL); |
2211 | |
2212 | lmi = now + brmctx->multicast_last_member_interval; |
2213 | if (!timer_pending(timer: &pg->rexmit_timer) || |
2214 | time_after(pg->rexmit_timer.expires, lmi)) |
2215 | mod_timer(timer: &pg->rexmit_timer, expires: lmi); |
2216 | } |
2217 | |
2218 | static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, |
2219 | struct net_bridge_mcast_port *pmctx, |
2220 | struct net_bridge_port_group *pg) |
2221 | { |
2222 | struct bridge_mcast_other_query *other_query = NULL; |
2223 | unsigned long now = jiffies, lmi; |
2224 | |
2225 | if (!netif_running(dev: brmctx->br->dev) || |
2226 | !br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED)) |
2227 | return; |
2228 | |
2229 | if (pg->key.addr.proto == htons(ETH_P_IP)) |
2230 | other_query = &brmctx->ip4_other_query; |
2231 | #if IS_ENABLED(CONFIG_IPV6) |
2232 | else |
2233 | other_query = &brmctx->ip6_other_query; |
2234 | #endif |
2235 | |
2236 | if (brmctx->multicast_querier && |
2237 | other_query && !timer_pending(timer: &other_query->timer)) { |
2238 | lmi = now + brmctx->multicast_last_member_interval; |
2239 | pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; |
2240 | __br_multicast_send_query(brmctx, pmctx, pg, ip_dst: &pg->key.addr, |
2241 | group: &pg->key.addr, with_srcs: false, sflag: 0, NULL); |
2242 | if (!timer_pending(timer: &pg->rexmit_timer) || |
2243 | time_after(pg->rexmit_timer.expires, lmi)) |
2244 | mod_timer(timer: &pg->rexmit_timer, expires: lmi); |
2245 | } |
2246 | |
2247 | if (pg->filter_mode == MCAST_EXCLUDE && |
2248 | (!timer_pending(timer: &pg->timer) || |
2249 | time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) |
2250 | mod_timer(timer: &pg->timer, expires: now + br_multicast_lmqt(brmctx)); |
2251 | } |
2252 | |
2253 | /* State Msg type New state Actions |
2254 | * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI |
2255 | * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI |
2256 | * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI |
2257 | */ |
2258 | static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, |
2259 | struct net_bridge_port_group *pg, void *h_addr, |
2260 | void *srcs, u32 nsrcs, size_t addr_size, |
2261 | int grec_type) |
2262 | { |
2263 | struct net_bridge_group_src *ent; |
2264 | unsigned long now = jiffies; |
2265 | bool changed = false; |
2266 | struct br_ip src_ip; |
2267 | u32 src_idx; |
2268 | |
2269 | memset(&src_ip, 0, sizeof(src_ip)); |
2270 | src_ip.proto = pg->key.addr.proto; |
2271 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2272 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2273 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2274 | if (!ent) { |
2275 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2276 | if (ent) |
2277 | changed = true; |
2278 | } |
2279 | |
2280 | if (ent) |
2281 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2282 | } |
2283 | |
2284 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2285 | grec_type)) |
2286 | changed = true; |
2287 | |
2288 | return changed; |
2289 | } |
2290 | |
2291 | /* State Msg type New state Actions |
2292 | * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 |
2293 | * Delete (A-B) |
2294 | * Group Timer=GMI |
2295 | */ |
2296 | static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, |
2297 | struct net_bridge_port_group *pg, void *h_addr, |
2298 | void *srcs, u32 nsrcs, size_t addr_size, |
2299 | int grec_type) |
2300 | { |
2301 | struct net_bridge_group_src *ent; |
2302 | struct br_ip src_ip; |
2303 | u32 src_idx; |
2304 | |
2305 | hlist_for_each_entry(ent, &pg->src_list, node) |
2306 | ent->flags |= BR_SGRP_F_DELETE; |
2307 | |
2308 | memset(&src_ip, 0, sizeof(src_ip)); |
2309 | src_ip.proto = pg->key.addr.proto; |
2310 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2311 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2312 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2313 | if (ent) |
2314 | ent->flags &= ~BR_SGRP_F_DELETE; |
2315 | else |
2316 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2317 | if (ent) |
2318 | br_multicast_fwd_src_handle(src: ent); |
2319 | } |
2320 | |
2321 | br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2322 | grec_type); |
2323 | |
2324 | __grp_src_delete_marked(pg); |
2325 | } |
2326 | |
2327 | /* State Msg type New state Actions |
2328 | * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI |
2329 | * Delete (X-A) |
2330 | * Delete (Y-A) |
2331 | * Group Timer=GMI |
2332 | */ |
2333 | static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, |
2334 | struct net_bridge_port_group *pg, void *h_addr, |
2335 | void *srcs, u32 nsrcs, size_t addr_size, |
2336 | int grec_type) |
2337 | { |
2338 | struct net_bridge_group_src *ent; |
2339 | unsigned long now = jiffies; |
2340 | bool changed = false; |
2341 | struct br_ip src_ip; |
2342 | u32 src_idx; |
2343 | |
2344 | hlist_for_each_entry(ent, &pg->src_list, node) |
2345 | ent->flags |= BR_SGRP_F_DELETE; |
2346 | |
2347 | memset(&src_ip, 0, sizeof(src_ip)); |
2348 | src_ip.proto = pg->key.addr.proto; |
2349 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2350 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2351 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2352 | if (ent) { |
2353 | ent->flags &= ~BR_SGRP_F_DELETE; |
2354 | } else { |
2355 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2356 | if (ent) { |
2357 | __grp_src_mod_timer(src: ent, |
2358 | expires: now + br_multicast_gmi(brmctx)); |
2359 | changed = true; |
2360 | } |
2361 | } |
2362 | } |
2363 | |
2364 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2365 | grec_type)) |
2366 | changed = true; |
2367 | |
2368 | if (__grp_src_delete_marked(pg)) |
2369 | changed = true; |
2370 | |
2371 | return changed; |
2372 | } |
2373 | |
2374 | static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, |
2375 | struct net_bridge_port_group *pg, void *h_addr, |
2376 | void *srcs, u32 nsrcs, size_t addr_size, |
2377 | int grec_type) |
2378 | { |
2379 | bool changed = false; |
2380 | |
2381 | switch (pg->filter_mode) { |
2382 | case MCAST_INCLUDE: |
2383 | __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2384 | grec_type); |
2385 | br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); |
2386 | changed = true; |
2387 | break; |
2388 | case MCAST_EXCLUDE: |
2389 | changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, |
2390 | addr_size, grec_type); |
2391 | break; |
2392 | } |
2393 | |
2394 | pg->filter_mode = MCAST_EXCLUDE; |
2395 | mod_timer(timer: &pg->timer, expires: jiffies + br_multicast_gmi(brmctx)); |
2396 | |
2397 | return changed; |
2398 | } |
2399 | |
2400 | /* State Msg type New state Actions |
2401 | * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI |
2402 | * Send Q(G,A-B) |
2403 | */ |
2404 | static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, |
2405 | struct net_bridge_mcast_port *pmctx, |
2406 | struct net_bridge_port_group *pg, void *h_addr, |
2407 | void *srcs, u32 nsrcs, size_t addr_size, |
2408 | int grec_type) |
2409 | { |
2410 | u32 src_idx, to_send = pg->src_ents; |
2411 | struct net_bridge_group_src *ent; |
2412 | unsigned long now = jiffies; |
2413 | bool changed = false; |
2414 | struct br_ip src_ip; |
2415 | |
2416 | hlist_for_each_entry(ent, &pg->src_list, node) |
2417 | ent->flags |= BR_SGRP_F_SEND; |
2418 | |
2419 | memset(&src_ip, 0, sizeof(src_ip)); |
2420 | src_ip.proto = pg->key.addr.proto; |
2421 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2422 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2423 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2424 | if (ent) { |
2425 | ent->flags &= ~BR_SGRP_F_SEND; |
2426 | to_send--; |
2427 | } else { |
2428 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2429 | if (ent) |
2430 | changed = true; |
2431 | } |
2432 | if (ent) |
2433 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2434 | } |
2435 | |
2436 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2437 | grec_type)) |
2438 | changed = true; |
2439 | |
2440 | if (to_send) |
2441 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2442 | |
2443 | return changed; |
2444 | } |
2445 | |
2446 | /* State Msg type New state Actions |
2447 | * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI |
2448 | * Send Q(G,X-A) |
2449 | * Send Q(G) |
2450 | */ |
2451 | static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, |
2452 | struct net_bridge_mcast_port *pmctx, |
2453 | struct net_bridge_port_group *pg, void *h_addr, |
2454 | void *srcs, u32 nsrcs, size_t addr_size, |
2455 | int grec_type) |
2456 | { |
2457 | u32 src_idx, to_send = pg->src_ents; |
2458 | struct net_bridge_group_src *ent; |
2459 | unsigned long now = jiffies; |
2460 | bool changed = false; |
2461 | struct br_ip src_ip; |
2462 | |
2463 | hlist_for_each_entry(ent, &pg->src_list, node) |
2464 | if (timer_pending(timer: &ent->timer)) |
2465 | ent->flags |= BR_SGRP_F_SEND; |
2466 | |
2467 | memset(&src_ip, 0, sizeof(src_ip)); |
2468 | src_ip.proto = pg->key.addr.proto; |
2469 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2470 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2471 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2472 | if (ent) { |
2473 | if (timer_pending(timer: &ent->timer)) { |
2474 | ent->flags &= ~BR_SGRP_F_SEND; |
2475 | to_send--; |
2476 | } |
2477 | } else { |
2478 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2479 | if (ent) |
2480 | changed = true; |
2481 | } |
2482 | if (ent) |
2483 | __grp_src_mod_timer(src: ent, expires: now + br_multicast_gmi(brmctx)); |
2484 | } |
2485 | |
2486 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2487 | grec_type)) |
2488 | changed = true; |
2489 | |
2490 | if (to_send) |
2491 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2492 | |
2493 | __grp_send_query_and_rexmit(brmctx, pmctx, pg); |
2494 | |
2495 | return changed; |
2496 | } |
2497 | |
2498 | static bool br_multicast_toin(struct net_bridge_mcast *brmctx, |
2499 | struct net_bridge_mcast_port *pmctx, |
2500 | struct net_bridge_port_group *pg, void *h_addr, |
2501 | void *srcs, u32 nsrcs, size_t addr_size, |
2502 | int grec_type) |
2503 | { |
2504 | bool changed = false; |
2505 | |
2506 | switch (pg->filter_mode) { |
2507 | case MCAST_INCLUDE: |
2508 | changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, |
2509 | nsrcs, addr_size, grec_type); |
2510 | break; |
2511 | case MCAST_EXCLUDE: |
2512 | changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, |
2513 | nsrcs, addr_size, grec_type); |
2514 | break; |
2515 | } |
2516 | |
2517 | if (br_multicast_eht_should_del_pg(pg)) { |
2518 | pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
2519 | br_multicast_find_del_pg(br: pg->key.port->br, pg); |
2520 | /* a notification has already been sent and we shouldn't |
2521 | * access pg after the delete so we have to return false |
2522 | */ |
2523 | changed = false; |
2524 | } |
2525 | |
2526 | return changed; |
2527 | } |
2528 | |
2529 | /* State Msg type New state Actions |
2530 | * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 |
2531 | * Delete (A-B) |
2532 | * Send Q(G,A*B) |
2533 | * Group Timer=GMI |
2534 | */ |
2535 | static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, |
2536 | struct net_bridge_mcast_port *pmctx, |
2537 | struct net_bridge_port_group *pg, void *h_addr, |
2538 | void *srcs, u32 nsrcs, size_t addr_size, |
2539 | int grec_type) |
2540 | { |
2541 | struct net_bridge_group_src *ent; |
2542 | u32 src_idx, to_send = 0; |
2543 | struct br_ip src_ip; |
2544 | |
2545 | hlist_for_each_entry(ent, &pg->src_list, node) |
2546 | ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; |
2547 | |
2548 | memset(&src_ip, 0, sizeof(src_ip)); |
2549 | src_ip.proto = pg->key.addr.proto; |
2550 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2551 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2552 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2553 | if (ent) { |
2554 | ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | |
2555 | BR_SGRP_F_SEND; |
2556 | to_send++; |
2557 | } else { |
2558 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2559 | } |
2560 | if (ent) |
2561 | br_multicast_fwd_src_handle(src: ent); |
2562 | } |
2563 | |
2564 | br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2565 | grec_type); |
2566 | |
2567 | __grp_src_delete_marked(pg); |
2568 | if (to_send) |
2569 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2570 | } |
2571 | |
2572 | /* State Msg type New state Actions |
2573 | * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer |
2574 | * Delete (X-A) |
2575 | * Delete (Y-A) |
2576 | * Send Q(G,A-Y) |
2577 | * Group Timer=GMI |
2578 | */ |
2579 | static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, |
2580 | struct net_bridge_mcast_port *pmctx, |
2581 | struct net_bridge_port_group *pg, void *h_addr, |
2582 | void *srcs, u32 nsrcs, size_t addr_size, |
2583 | int grec_type) |
2584 | { |
2585 | struct net_bridge_group_src *ent; |
2586 | u32 src_idx, to_send = 0; |
2587 | bool changed = false; |
2588 | struct br_ip src_ip; |
2589 | |
2590 | hlist_for_each_entry(ent, &pg->src_list, node) |
2591 | ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; |
2592 | |
2593 | memset(&src_ip, 0, sizeof(src_ip)); |
2594 | src_ip.proto = pg->key.addr.proto; |
2595 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2596 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2597 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2598 | if (ent) { |
2599 | ent->flags &= ~BR_SGRP_F_DELETE; |
2600 | } else { |
2601 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2602 | if (ent) { |
2603 | __grp_src_mod_timer(src: ent, expires: pg->timer.expires); |
2604 | changed = true; |
2605 | } |
2606 | } |
2607 | if (ent && timer_pending(timer: &ent->timer)) { |
2608 | ent->flags |= BR_SGRP_F_SEND; |
2609 | to_send++; |
2610 | } |
2611 | } |
2612 | |
2613 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2614 | grec_type)) |
2615 | changed = true; |
2616 | |
2617 | if (__grp_src_delete_marked(pg)) |
2618 | changed = true; |
2619 | if (to_send) |
2620 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2621 | |
2622 | return changed; |
2623 | } |
2624 | |
2625 | static bool br_multicast_toex(struct net_bridge_mcast *brmctx, |
2626 | struct net_bridge_mcast_port *pmctx, |
2627 | struct net_bridge_port_group *pg, void *h_addr, |
2628 | void *srcs, u32 nsrcs, size_t addr_size, |
2629 | int grec_type) |
2630 | { |
2631 | bool changed = false; |
2632 | |
2633 | switch (pg->filter_mode) { |
2634 | case MCAST_INCLUDE: |
2635 | __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, |
2636 | addr_size, grec_type); |
2637 | br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); |
2638 | changed = true; |
2639 | break; |
2640 | case MCAST_EXCLUDE: |
2641 | changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, |
2642 | nsrcs, addr_size, grec_type); |
2643 | break; |
2644 | } |
2645 | |
2646 | pg->filter_mode = MCAST_EXCLUDE; |
2647 | mod_timer(timer: &pg->timer, expires: jiffies + br_multicast_gmi(brmctx)); |
2648 | |
2649 | return changed; |
2650 | } |
2651 | |
2652 | /* State Msg type New state Actions |
2653 | * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) |
2654 | */ |
2655 | static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, |
2656 | struct net_bridge_mcast_port *pmctx, |
2657 | struct net_bridge_port_group *pg, void *h_addr, |
2658 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2659 | { |
2660 | struct net_bridge_group_src *ent; |
2661 | u32 src_idx, to_send = 0; |
2662 | bool changed = false; |
2663 | struct br_ip src_ip; |
2664 | |
2665 | hlist_for_each_entry(ent, &pg->src_list, node) |
2666 | ent->flags &= ~BR_SGRP_F_SEND; |
2667 | |
2668 | memset(&src_ip, 0, sizeof(src_ip)); |
2669 | src_ip.proto = pg->key.addr.proto; |
2670 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2671 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2672 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2673 | if (ent) { |
2674 | ent->flags |= BR_SGRP_F_SEND; |
2675 | to_send++; |
2676 | } |
2677 | } |
2678 | |
2679 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2680 | grec_type)) |
2681 | changed = true; |
2682 | |
2683 | if (to_send) |
2684 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2685 | |
2686 | return changed; |
2687 | } |
2688 | |
2689 | /* State Msg type New state Actions |
2690 | * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer |
2691 | * Send Q(G,A-Y) |
2692 | */ |
2693 | static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, |
2694 | struct net_bridge_mcast_port *pmctx, |
2695 | struct net_bridge_port_group *pg, void *h_addr, |
2696 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2697 | { |
2698 | struct net_bridge_group_src *ent; |
2699 | u32 src_idx, to_send = 0; |
2700 | bool changed = false; |
2701 | struct br_ip src_ip; |
2702 | |
2703 | hlist_for_each_entry(ent, &pg->src_list, node) |
2704 | ent->flags &= ~BR_SGRP_F_SEND; |
2705 | |
2706 | memset(&src_ip, 0, sizeof(src_ip)); |
2707 | src_ip.proto = pg->key.addr.proto; |
2708 | for (src_idx = 0; src_idx < nsrcs; src_idx++) { |
2709 | memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); |
2710 | ent = br_multicast_find_group_src(pg, ip: &src_ip); |
2711 | if (!ent) { |
2712 | ent = br_multicast_new_group_src(pg, src_ip: &src_ip); |
2713 | if (ent) { |
2714 | __grp_src_mod_timer(src: ent, expires: pg->timer.expires); |
2715 | changed = true; |
2716 | } |
2717 | } |
2718 | if (ent && timer_pending(timer: &ent->timer)) { |
2719 | ent->flags |= BR_SGRP_F_SEND; |
2720 | to_send++; |
2721 | } |
2722 | } |
2723 | |
2724 | if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, |
2725 | grec_type)) |
2726 | changed = true; |
2727 | |
2728 | if (to_send) |
2729 | __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); |
2730 | |
2731 | return changed; |
2732 | } |
2733 | |
2734 | static bool br_multicast_block(struct net_bridge_mcast *brmctx, |
2735 | struct net_bridge_mcast_port *pmctx, |
2736 | struct net_bridge_port_group *pg, void *h_addr, |
2737 | void *srcs, u32 nsrcs, size_t addr_size, int grec_type) |
2738 | { |
2739 | bool changed = false; |
2740 | |
2741 | switch (pg->filter_mode) { |
2742 | case MCAST_INCLUDE: |
2743 | changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, |
2744 | nsrcs, addr_size, grec_type); |
2745 | break; |
2746 | case MCAST_EXCLUDE: |
2747 | changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, |
2748 | nsrcs, addr_size, grec_type); |
2749 | break; |
2750 | } |
2751 | |
2752 | if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(h: &pg->src_list)) || |
2753 | br_multicast_eht_should_del_pg(pg)) { |
2754 | if (br_multicast_eht_should_del_pg(pg)) |
2755 | pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
2756 | br_multicast_find_del_pg(br: pg->key.port->br, pg); |
2757 | /* a notification has already been sent and we shouldn't |
2758 | * access pg after the delete so we have to return false |
2759 | */ |
2760 | changed = false; |
2761 | } |
2762 | |
2763 | return changed; |
2764 | } |
2765 | |
2766 | static struct net_bridge_port_group * |
2767 | br_multicast_find_port(struct net_bridge_mdb_entry *mp, |
2768 | struct net_bridge_port *p, |
2769 | const unsigned char *src) |
2770 | { |
2771 | struct net_bridge *br __maybe_unused = mp->br; |
2772 | struct net_bridge_port_group *pg; |
2773 | |
2774 | for (pg = mlock_dereference(mp->ports, br); |
2775 | pg; |
2776 | pg = mlock_dereference(pg->next, br)) |
2777 | if (br_port_group_equal(p: pg, port: p, src)) |
2778 | return pg; |
2779 | |
2780 | return NULL; |
2781 | } |
2782 | |
2783 | static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, |
2784 | struct net_bridge_mcast_port *pmctx, |
2785 | struct sk_buff *skb, |
2786 | u16 vid) |
2787 | { |
2788 | bool igmpv2 = brmctx->multicast_igmp_version == 2; |
2789 | struct net_bridge_mdb_entry *mdst; |
2790 | struct net_bridge_port_group *pg; |
2791 | const unsigned char *src; |
2792 | struct igmpv3_report *ih; |
2793 | struct igmpv3_grec *grec; |
2794 | int i, len, num, type; |
2795 | __be32 group, *h_addr; |
2796 | bool changed = false; |
2797 | int err = 0; |
2798 | u16 nsrcs; |
2799 | |
2800 | ih = igmpv3_report_hdr(skb); |
2801 | num = ntohs(ih->ngrec); |
2802 | len = skb_transport_offset(skb) + sizeof(*ih); |
2803 | |
2804 | for (i = 0; i < num; i++) { |
2805 | len += sizeof(*grec); |
2806 | if (!ip_mc_may_pull(skb, len)) |
2807 | return -EINVAL; |
2808 | |
2809 | grec = (void *)(skb->data + len - sizeof(*grec)); |
2810 | group = grec->grec_mca; |
2811 | type = grec->grec_type; |
2812 | nsrcs = ntohs(grec->grec_nsrcs); |
2813 | |
2814 | len += nsrcs * 4; |
2815 | if (!ip_mc_may_pull(skb, len)) |
2816 | return -EINVAL; |
2817 | |
2818 | switch (type) { |
2819 | case IGMPV3_MODE_IS_INCLUDE: |
2820 | case IGMPV3_MODE_IS_EXCLUDE: |
2821 | case IGMPV3_CHANGE_TO_INCLUDE: |
2822 | case IGMPV3_CHANGE_TO_EXCLUDE: |
2823 | case IGMPV3_ALLOW_NEW_SOURCES: |
2824 | case IGMPV3_BLOCK_OLD_SOURCES: |
2825 | break; |
2826 | |
2827 | default: |
2828 | continue; |
2829 | } |
2830 | |
2831 | src = eth_hdr(skb)->h_source; |
2832 | if (nsrcs == 0 && |
2833 | (type == IGMPV3_CHANGE_TO_INCLUDE || |
2834 | type == IGMPV3_MODE_IS_INCLUDE)) { |
2835 | if (!pmctx || igmpv2) { |
2836 | br_ip4_multicast_leave_group(brmctx, pmctx, |
2837 | group, vid, src); |
2838 | continue; |
2839 | } |
2840 | } else { |
2841 | err = br_ip4_multicast_add_group(brmctx, pmctx, group, |
2842 | vid, src, igmpv2); |
2843 | if (err) |
2844 | break; |
2845 | } |
2846 | |
2847 | if (!pmctx || igmpv2) |
2848 | continue; |
2849 | |
2850 | spin_lock(lock: &brmctx->br->multicast_lock); |
2851 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
2852 | goto unlock_continue; |
2853 | |
2854 | mdst = br_mdb_ip4_get(br: brmctx->br, dst: group, vid); |
2855 | if (!mdst) |
2856 | goto unlock_continue; |
2857 | pg = br_multicast_find_port(mp: mdst, p: pmctx->port, src); |
2858 | if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) |
2859 | goto unlock_continue; |
2860 | /* reload grec and host addr */ |
2861 | grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); |
2862 | h_addr = &ip_hdr(skb)->saddr; |
2863 | switch (type) { |
2864 | case IGMPV3_ALLOW_NEW_SOURCES: |
2865 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
2866 | srcs: grec->grec_src, |
2867 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2868 | break; |
2869 | case IGMPV3_MODE_IS_INCLUDE: |
2870 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
2871 | srcs: grec->grec_src, |
2872 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2873 | break; |
2874 | case IGMPV3_MODE_IS_EXCLUDE: |
2875 | changed = br_multicast_isexc(brmctx, pg, h_addr, |
2876 | srcs: grec->grec_src, |
2877 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2878 | break; |
2879 | case IGMPV3_CHANGE_TO_INCLUDE: |
2880 | changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, |
2881 | srcs: grec->grec_src, |
2882 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2883 | break; |
2884 | case IGMPV3_CHANGE_TO_EXCLUDE: |
2885 | changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, |
2886 | srcs: grec->grec_src, |
2887 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2888 | break; |
2889 | case IGMPV3_BLOCK_OLD_SOURCES: |
2890 | changed = br_multicast_block(brmctx, pmctx, pg, h_addr, |
2891 | srcs: grec->grec_src, |
2892 | nsrcs, addr_size: sizeof(__be32), grec_type: type); |
2893 | break; |
2894 | } |
2895 | if (changed) |
2896 | br_mdb_notify(dev: brmctx->br->dev, mp: mdst, pg, RTM_NEWMDB); |
2897 | unlock_continue: |
2898 | spin_unlock(lock: &brmctx->br->multicast_lock); |
2899 | } |
2900 | |
2901 | return err; |
2902 | } |
2903 | |
2904 | #if IS_ENABLED(CONFIG_IPV6) |
2905 | static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, |
2906 | struct net_bridge_mcast_port *pmctx, |
2907 | struct sk_buff *skb, |
2908 | u16 vid) |
2909 | { |
2910 | bool mldv1 = brmctx->multicast_mld_version == 1; |
2911 | struct net_bridge_mdb_entry *mdst; |
2912 | struct net_bridge_port_group *pg; |
2913 | unsigned int nsrcs_offset; |
2914 | struct mld2_report *mld2r; |
2915 | const unsigned char *src; |
2916 | struct in6_addr *h_addr; |
2917 | struct mld2_grec *grec; |
2918 | unsigned int grec_len; |
2919 | bool changed = false; |
2920 | int i, len, num; |
2921 | int err = 0; |
2922 | |
2923 | if (!ipv6_mc_may_pull(skb, len: sizeof(*mld2r))) |
2924 | return -EINVAL; |
2925 | |
2926 | mld2r = (struct mld2_report *)icmp6_hdr(skb); |
2927 | num = ntohs(mld2r->mld2r_ngrec); |
2928 | len = skb_transport_offset(skb) + sizeof(*mld2r); |
2929 | |
2930 | for (i = 0; i < num; i++) { |
2931 | __be16 *_nsrcs, __nsrcs; |
2932 | u16 nsrcs; |
2933 | |
2934 | nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); |
2935 | |
2936 | if (skb_transport_offset(skb) + ipv6_transport_len(skb) < |
2937 | nsrcs_offset + sizeof(__nsrcs)) |
2938 | return -EINVAL; |
2939 | |
2940 | _nsrcs = skb_header_pointer(skb, offset: nsrcs_offset, |
2941 | len: sizeof(__nsrcs), buffer: &__nsrcs); |
2942 | if (!_nsrcs) |
2943 | return -EINVAL; |
2944 | |
2945 | nsrcs = ntohs(*_nsrcs); |
2946 | grec_len = struct_size(grec, grec_src, nsrcs); |
2947 | |
2948 | if (!ipv6_mc_may_pull(skb, len: len + grec_len)) |
2949 | return -EINVAL; |
2950 | |
2951 | grec = (struct mld2_grec *)(skb->data + len); |
2952 | len += grec_len; |
2953 | |
2954 | switch (grec->grec_type) { |
2955 | case MLD2_MODE_IS_INCLUDE: |
2956 | case MLD2_MODE_IS_EXCLUDE: |
2957 | case MLD2_CHANGE_TO_INCLUDE: |
2958 | case MLD2_CHANGE_TO_EXCLUDE: |
2959 | case MLD2_ALLOW_NEW_SOURCES: |
2960 | case MLD2_BLOCK_OLD_SOURCES: |
2961 | break; |
2962 | |
2963 | default: |
2964 | continue; |
2965 | } |
2966 | |
2967 | src = eth_hdr(skb)->h_source; |
2968 | if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || |
2969 | grec->grec_type == MLD2_MODE_IS_INCLUDE) && |
2970 | nsrcs == 0) { |
2971 | if (!pmctx || mldv1) { |
2972 | br_ip6_multicast_leave_group(brmctx, pmctx, |
2973 | group: &grec->grec_mca, |
2974 | vid, src); |
2975 | continue; |
2976 | } |
2977 | } else { |
2978 | err = br_ip6_multicast_add_group(brmctx, pmctx, |
2979 | group: &grec->grec_mca, vid, |
2980 | src, mldv1); |
2981 | if (err) |
2982 | break; |
2983 | } |
2984 | |
2985 | if (!pmctx || mldv1) |
2986 | continue; |
2987 | |
2988 | spin_lock(lock: &brmctx->br->multicast_lock); |
2989 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
2990 | goto unlock_continue; |
2991 | |
2992 | mdst = br_mdb_ip6_get(br: brmctx->br, dst: &grec->grec_mca, vid); |
2993 | if (!mdst) |
2994 | goto unlock_continue; |
2995 | pg = br_multicast_find_port(mp: mdst, p: pmctx->port, src); |
2996 | if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) |
2997 | goto unlock_continue; |
2998 | h_addr = &ipv6_hdr(skb)->saddr; |
2999 | switch (grec->grec_type) { |
3000 | case MLD2_ALLOW_NEW_SOURCES: |
3001 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
3002 | srcs: grec->grec_src, nsrcs, |
3003 | addr_size: sizeof(struct in6_addr), |
3004 | grec_type: grec->grec_type); |
3005 | break; |
3006 | case MLD2_MODE_IS_INCLUDE: |
3007 | changed = br_multicast_isinc_allow(brmctx, pg, h_addr, |
3008 | srcs: grec->grec_src, nsrcs, |
3009 | addr_size: sizeof(struct in6_addr), |
3010 | grec_type: grec->grec_type); |
3011 | break; |
3012 | case MLD2_MODE_IS_EXCLUDE: |
3013 | changed = br_multicast_isexc(brmctx, pg, h_addr, |
3014 | srcs: grec->grec_src, nsrcs, |
3015 | addr_size: sizeof(struct in6_addr), |
3016 | grec_type: grec->grec_type); |
3017 | break; |
3018 | case MLD2_CHANGE_TO_INCLUDE: |
3019 | changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, |
3020 | srcs: grec->grec_src, nsrcs, |
3021 | addr_size: sizeof(struct in6_addr), |
3022 | grec_type: grec->grec_type); |
3023 | break; |
3024 | case MLD2_CHANGE_TO_EXCLUDE: |
3025 | changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, |
3026 | srcs: grec->grec_src, nsrcs, |
3027 | addr_size: sizeof(struct in6_addr), |
3028 | grec_type: grec->grec_type); |
3029 | break; |
3030 | case MLD2_BLOCK_OLD_SOURCES: |
3031 | changed = br_multicast_block(brmctx, pmctx, pg, h_addr, |
3032 | srcs: grec->grec_src, nsrcs, |
3033 | addr_size: sizeof(struct in6_addr), |
3034 | grec_type: grec->grec_type); |
3035 | break; |
3036 | } |
3037 | if (changed) |
3038 | br_mdb_notify(dev: brmctx->br->dev, mp: mdst, pg, RTM_NEWMDB); |
3039 | unlock_continue: |
3040 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3041 | } |
3042 | |
3043 | return err; |
3044 | } |
3045 | #endif |
3046 | |
3047 | static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, |
3048 | struct net_bridge_mcast_port *pmctx, |
3049 | struct br_ip *saddr) |
3050 | { |
3051 | int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; |
3052 | struct timer_list *own_timer, *other_timer; |
3053 | struct bridge_mcast_querier *querier; |
3054 | |
3055 | switch (saddr->proto) { |
3056 | case htons(ETH_P_IP): |
3057 | querier = &brmctx->ip4_querier; |
3058 | own_timer = &brmctx->ip4_own_query.timer; |
3059 | other_timer = &brmctx->ip4_other_query.timer; |
3060 | if (!querier->addr.src.ip4 || |
3061 | ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) |
3062 | goto update; |
3063 | break; |
3064 | #if IS_ENABLED(CONFIG_IPV6) |
3065 | case htons(ETH_P_IPV6): |
3066 | querier = &brmctx->ip6_querier; |
3067 | own_timer = &brmctx->ip6_own_query.timer; |
3068 | other_timer = &brmctx->ip6_other_query.timer; |
3069 | if (ipv6_addr_cmp(a1: &saddr->src.ip6, a2: &querier->addr.src.ip6) <= 0) |
3070 | goto update; |
3071 | break; |
3072 | #endif |
3073 | default: |
3074 | return false; |
3075 | } |
3076 | |
3077 | if (!timer_pending(timer: own_timer) && !timer_pending(timer: other_timer)) |
3078 | goto update; |
3079 | |
3080 | return false; |
3081 | |
3082 | update: |
3083 | br_multicast_update_querier(brmctx, querier, ifindex: port_ifidx, saddr); |
3084 | |
3085 | return true; |
3086 | } |
3087 | |
3088 | static struct net_bridge_port * |
3089 | __br_multicast_get_querier_port(struct net_bridge *br, |
3090 | const struct bridge_mcast_querier *querier) |
3091 | { |
3092 | int port_ifidx = READ_ONCE(querier->port_ifidx); |
3093 | struct net_bridge_port *p; |
3094 | struct net_device *dev; |
3095 | |
3096 | if (port_ifidx == 0) |
3097 | return NULL; |
3098 | |
3099 | dev = dev_get_by_index_rcu(net: dev_net(dev: br->dev), ifindex: port_ifidx); |
3100 | if (!dev) |
3101 | return NULL; |
3102 | p = br_port_get_rtnl_rcu(dev); |
3103 | if (!p || p->br != br) |
3104 | return NULL; |
3105 | |
3106 | return p; |
3107 | } |
3108 | |
3109 | size_t br_multicast_querier_state_size(void) |
3110 | { |
3111 | return nla_total_size(payload: 0) + /* nest attribute */ |
3112 | nla_total_size(payload: sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ |
3113 | nla_total_size(payload: sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ |
3114 | nla_total_size_64bit(payload: sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ |
3115 | #if IS_ENABLED(CONFIG_IPV6) |
3116 | nla_total_size(payload: sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ |
3117 | nla_total_size(payload: sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ |
3118 | nla_total_size_64bit(payload: sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ |
3119 | #endif |
3120 | 0; |
3121 | } |
3122 | |
3123 | /* protected by rtnl or rcu */ |
3124 | int br_multicast_dump_querier_state(struct sk_buff *skb, |
3125 | const struct net_bridge_mcast *brmctx, |
3126 | int nest_attr) |
3127 | { |
3128 | struct bridge_mcast_querier querier = {}; |
3129 | struct net_bridge_port *p; |
3130 | struct nlattr *nest; |
3131 | |
3132 | if (!br_opt_get(br: brmctx->br, opt: BROPT_MULTICAST_ENABLED) || |
3133 | br_multicast_ctx_vlan_global_disabled(brmctx)) |
3134 | return 0; |
3135 | |
3136 | nest = nla_nest_start(skb, attrtype: nest_attr); |
3137 | if (!nest) |
3138 | return -EMSGSIZE; |
3139 | |
3140 | rcu_read_lock(); |
3141 | if (!brmctx->multicast_querier && |
3142 | !timer_pending(timer: &brmctx->ip4_other_query.timer)) |
3143 | goto out_v6; |
3144 | |
3145 | br_multicast_read_querier(querier: &brmctx->ip4_querier, dest: &querier); |
3146 | if (nla_put_in_addr(skb, attrtype: BRIDGE_QUERIER_IP_ADDRESS, |
3147 | addr: querier.addr.src.ip4)) { |
3148 | rcu_read_unlock(); |
3149 | goto out_err; |
3150 | } |
3151 | |
3152 | p = __br_multicast_get_querier_port(br: brmctx->br, querier: &querier); |
3153 | if (timer_pending(timer: &brmctx->ip4_other_query.timer) && |
3154 | (nla_put_u64_64bit(skb, attrtype: BRIDGE_QUERIER_IP_OTHER_TIMER, |
3155 | value: br_timer_value(timer: &brmctx->ip4_other_query.timer), |
3156 | padattr: BRIDGE_QUERIER_PAD) || |
3157 | (p && nla_put_u32(skb, attrtype: BRIDGE_QUERIER_IP_PORT, value: p->dev->ifindex)))) { |
3158 | rcu_read_unlock(); |
3159 | goto out_err; |
3160 | } |
3161 | |
3162 | out_v6: |
3163 | #if IS_ENABLED(CONFIG_IPV6) |
3164 | if (!brmctx->multicast_querier && |
3165 | !timer_pending(timer: &brmctx->ip6_other_query.timer)) |
3166 | goto out; |
3167 | |
3168 | br_multicast_read_querier(querier: &brmctx->ip6_querier, dest: &querier); |
3169 | if (nla_put_in6_addr(skb, attrtype: BRIDGE_QUERIER_IPV6_ADDRESS, |
3170 | addr: &querier.addr.src.ip6)) { |
3171 | rcu_read_unlock(); |
3172 | goto out_err; |
3173 | } |
3174 | |
3175 | p = __br_multicast_get_querier_port(br: brmctx->br, querier: &querier); |
3176 | if (timer_pending(timer: &brmctx->ip6_other_query.timer) && |
3177 | (nla_put_u64_64bit(skb, attrtype: BRIDGE_QUERIER_IPV6_OTHER_TIMER, |
3178 | value: br_timer_value(timer: &brmctx->ip6_other_query.timer), |
3179 | padattr: BRIDGE_QUERIER_PAD) || |
3180 | (p && nla_put_u32(skb, attrtype: BRIDGE_QUERIER_IPV6_PORT, |
3181 | value: p->dev->ifindex)))) { |
3182 | rcu_read_unlock(); |
3183 | goto out_err; |
3184 | } |
3185 | out: |
3186 | #endif |
3187 | rcu_read_unlock(); |
3188 | nla_nest_end(skb, start: nest); |
3189 | if (!nla_len(nla: nest)) |
3190 | nla_nest_cancel(skb, start: nest); |
3191 | |
3192 | return 0; |
3193 | |
3194 | out_err: |
3195 | nla_nest_cancel(skb, start: nest); |
3196 | return -EMSGSIZE; |
3197 | } |
3198 | |
3199 | static void |
3200 | br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, |
3201 | struct bridge_mcast_other_query *query, |
3202 | unsigned long max_delay) |
3203 | { |
3204 | if (!timer_pending(timer: &query->timer)) |
3205 | mod_timer(timer: &query->delay_timer, expires: jiffies + max_delay); |
3206 | |
3207 | mod_timer(timer: &query->timer, expires: jiffies + brmctx->multicast_querier_interval); |
3208 | } |
3209 | |
3210 | static void br_port_mc_router_state_change(struct net_bridge_port *p, |
3211 | bool is_mc_router) |
3212 | { |
3213 | struct switchdev_attr attr = { |
3214 | .orig_dev = p->dev, |
3215 | .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, |
3216 | .flags = SWITCHDEV_F_DEFER, |
3217 | .u.mrouter = is_mc_router, |
3218 | }; |
3219 | |
3220 | switchdev_port_attr_set(dev: p->dev, attr: &attr, NULL); |
3221 | } |
3222 | |
3223 | static struct net_bridge_port * |
3224 | br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, |
3225 | struct hlist_head *mc_router_list, |
3226 | struct hlist_node *rlist) |
3227 | { |
3228 | struct net_bridge_mcast_port *pmctx; |
3229 | |
3230 | #if IS_ENABLED(CONFIG_IPV6) |
3231 | if (mc_router_list == &brmctx->ip6_mc_router_list) |
3232 | pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, |
3233 | ip6_rlist); |
3234 | else |
3235 | #endif |
3236 | pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, |
3237 | ip4_rlist); |
3238 | |
3239 | return pmctx->port; |
3240 | } |
3241 | |
3242 | static struct hlist_node * |
3243 | br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, |
3244 | struct net_bridge_port *port, |
3245 | struct hlist_head *mc_router_list) |
3246 | |
3247 | { |
3248 | struct hlist_node *slot = NULL; |
3249 | struct net_bridge_port *p; |
3250 | struct hlist_node *rlist; |
3251 | |
3252 | hlist_for_each(rlist, mc_router_list) { |
3253 | p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); |
3254 | |
3255 | if ((unsigned long)port >= (unsigned long)p) |
3256 | break; |
3257 | |
3258 | slot = rlist; |
3259 | } |
3260 | |
3261 | return slot; |
3262 | } |
3263 | |
3264 | static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, |
3265 | struct hlist_node *rnode) |
3266 | { |
3267 | #if IS_ENABLED(CONFIG_IPV6) |
3268 | if (rnode != &pmctx->ip6_rlist) |
3269 | return hlist_unhashed(h: &pmctx->ip6_rlist); |
3270 | else |
3271 | return hlist_unhashed(h: &pmctx->ip4_rlist); |
3272 | #else |
3273 | return true; |
3274 | #endif |
3275 | } |
3276 | |
3277 | /* Add port to router_list |
3278 | * list is maintained ordered by pointer value |
3279 | * and locked by br->multicast_lock and RCU |
3280 | */ |
3281 | static void br_multicast_add_router(struct net_bridge_mcast *brmctx, |
3282 | struct net_bridge_mcast_port *pmctx, |
3283 | struct hlist_node *rlist, |
3284 | struct hlist_head *mc_router_list) |
3285 | { |
3286 | struct hlist_node *slot; |
3287 | |
3288 | if (!hlist_unhashed(h: rlist)) |
3289 | return; |
3290 | |
3291 | slot = br_multicast_get_rport_slot(brmctx, port: pmctx->port, mc_router_list); |
3292 | |
3293 | if (slot) |
3294 | hlist_add_behind_rcu(n: rlist, prev: slot); |
3295 | else |
3296 | hlist_add_head_rcu(n: rlist, h: mc_router_list); |
3297 | |
3298 | /* For backwards compatibility for now, only notify if we |
3299 | * switched from no IPv4/IPv6 multicast router to a new |
3300 | * IPv4 or IPv6 multicast router. |
3301 | */ |
3302 | if (br_multicast_no_router_otherpf(pmctx, rnode: rlist)) { |
3303 | br_rtr_notify(dev: pmctx->port->br->dev, pmctx, RTM_NEWMDB); |
3304 | br_port_mc_router_state_change(p: pmctx->port, is_mc_router: true); |
3305 | } |
3306 | } |
3307 | |
3308 | /* Add port to router_list |
3309 | * list is maintained ordered by pointer value |
3310 | * and locked by br->multicast_lock and RCU |
3311 | */ |
3312 | static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, |
3313 | struct net_bridge_mcast_port *pmctx) |
3314 | { |
3315 | br_multicast_add_router(brmctx, pmctx, rlist: &pmctx->ip4_rlist, |
3316 | mc_router_list: &brmctx->ip4_mc_router_list); |
3317 | } |
3318 | |
3319 | /* Add port to router_list |
3320 | * list is maintained ordered by pointer value |
3321 | * and locked by br->multicast_lock and RCU |
3322 | */ |
3323 | static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, |
3324 | struct net_bridge_mcast_port *pmctx) |
3325 | { |
3326 | #if IS_ENABLED(CONFIG_IPV6) |
3327 | br_multicast_add_router(brmctx, pmctx, rlist: &pmctx->ip6_rlist, |
3328 | mc_router_list: &brmctx->ip6_mc_router_list); |
3329 | #endif |
3330 | } |
3331 | |
3332 | static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3333 | struct net_bridge_mcast_port *pmctx, |
3334 | struct timer_list *timer, |
3335 | struct hlist_node *rlist, |
3336 | struct hlist_head *mc_router_list) |
3337 | { |
3338 | unsigned long now = jiffies; |
3339 | |
3340 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3341 | return; |
3342 | |
3343 | if (!pmctx) { |
3344 | if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { |
3345 | if (!br_ip4_multicast_is_router(brmctx) && |
3346 | !br_ip6_multicast_is_router(brmctx)) |
3347 | br_mc_router_state_change(p: brmctx->br, is_mc_router: true); |
3348 | mod_timer(timer, expires: now + brmctx->multicast_querier_interval); |
3349 | } |
3350 | return; |
3351 | } |
3352 | |
3353 | if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || |
3354 | pmctx->multicast_router == MDB_RTR_TYPE_PERM) |
3355 | return; |
3356 | |
3357 | br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); |
3358 | mod_timer(timer, expires: now + brmctx->multicast_querier_interval); |
3359 | } |
3360 | |
3361 | static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3362 | struct net_bridge_mcast_port *pmctx) |
3363 | { |
3364 | struct timer_list *timer = &brmctx->ip4_mc_router_timer; |
3365 | struct hlist_node *rlist = NULL; |
3366 | |
3367 | if (pmctx) { |
3368 | timer = &pmctx->ip4_mc_router_timer; |
3369 | rlist = &pmctx->ip4_rlist; |
3370 | } |
3371 | |
3372 | br_multicast_mark_router(brmctx, pmctx, timer, rlist, |
3373 | mc_router_list: &brmctx->ip4_mc_router_list); |
3374 | } |
3375 | |
3376 | static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, |
3377 | struct net_bridge_mcast_port *pmctx) |
3378 | { |
3379 | #if IS_ENABLED(CONFIG_IPV6) |
3380 | struct timer_list *timer = &brmctx->ip6_mc_router_timer; |
3381 | struct hlist_node *rlist = NULL; |
3382 | |
3383 | if (pmctx) { |
3384 | timer = &pmctx->ip6_mc_router_timer; |
3385 | rlist = &pmctx->ip6_rlist; |
3386 | } |
3387 | |
3388 | br_multicast_mark_router(brmctx, pmctx, timer, rlist, |
3389 | mc_router_list: &brmctx->ip6_mc_router_list); |
3390 | #endif |
3391 | } |
3392 | |
3393 | static void |
3394 | br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, |
3395 | struct net_bridge_mcast_port *pmctx, |
3396 | struct bridge_mcast_other_query *query, |
3397 | struct br_ip *saddr, |
3398 | unsigned long max_delay) |
3399 | { |
3400 | if (!br_multicast_select_querier(brmctx, pmctx, saddr)) |
3401 | return; |
3402 | |
3403 | br_multicast_update_query_timer(brmctx, query, max_delay); |
3404 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3405 | } |
3406 | |
3407 | #if IS_ENABLED(CONFIG_IPV6) |
3408 | static void |
3409 | br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, |
3410 | struct net_bridge_mcast_port *pmctx, |
3411 | struct bridge_mcast_other_query *query, |
3412 | struct br_ip *saddr, |
3413 | unsigned long max_delay) |
3414 | { |
3415 | if (!br_multicast_select_querier(brmctx, pmctx, saddr)) |
3416 | return; |
3417 | |
3418 | br_multicast_update_query_timer(brmctx, query, max_delay); |
3419 | br_ip6_multicast_mark_router(brmctx, pmctx); |
3420 | } |
3421 | #endif |
3422 | |
3423 | static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, |
3424 | struct net_bridge_mcast_port *pmctx, |
3425 | struct sk_buff *skb, |
3426 | u16 vid) |
3427 | { |
3428 | unsigned int transport_len = ip_transport_len(skb); |
3429 | const struct iphdr *iph = ip_hdr(skb); |
3430 | struct igmphdr *ih = igmp_hdr(skb); |
3431 | struct net_bridge_mdb_entry *mp; |
3432 | struct igmpv3_query *ih3; |
3433 | struct net_bridge_port_group *p; |
3434 | struct net_bridge_port_group __rcu **pp; |
3435 | struct br_ip saddr = {}; |
3436 | unsigned long max_delay; |
3437 | unsigned long now = jiffies; |
3438 | __be32 group; |
3439 | |
3440 | spin_lock(lock: &brmctx->br->multicast_lock); |
3441 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3442 | goto out; |
3443 | |
3444 | group = ih->group; |
3445 | |
3446 | if (transport_len == sizeof(*ih)) { |
3447 | max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); |
3448 | |
3449 | if (!max_delay) { |
3450 | max_delay = 10 * HZ; |
3451 | group = 0; |
3452 | } |
3453 | } else if (transport_len >= sizeof(*ih3)) { |
3454 | ih3 = igmpv3_query_hdr(skb); |
3455 | if (ih3->nsrcs || |
3456 | (brmctx->multicast_igmp_version == 3 && group && |
3457 | ih3->suppress)) |
3458 | goto out; |
3459 | |
3460 | max_delay = ih3->code ? |
3461 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
3462 | } else { |
3463 | goto out; |
3464 | } |
3465 | |
3466 | if (!group) { |
3467 | saddr.proto = htons(ETH_P_IP); |
3468 | saddr.src.ip4 = iph->saddr; |
3469 | |
3470 | br_ip4_multicast_query_received(brmctx, pmctx, |
3471 | query: &brmctx->ip4_other_query, |
3472 | saddr: &saddr, max_delay); |
3473 | goto out; |
3474 | } |
3475 | |
3476 | mp = br_mdb_ip4_get(br: brmctx->br, dst: group, vid); |
3477 | if (!mp) |
3478 | goto out; |
3479 | |
3480 | max_delay *= brmctx->multicast_last_member_count; |
3481 | |
3482 | if (mp->host_joined && |
3483 | (timer_pending(timer: &mp->timer) ? |
3484 | time_after(mp->timer.expires, now + max_delay) : |
3485 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) |
3486 | mod_timer(timer: &mp->timer, expires: now + max_delay); |
3487 | |
3488 | for (pp = &mp->ports; |
3489 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3490 | pp = &p->next) { |
3491 | if (timer_pending(timer: &p->timer) ? |
3492 | time_after(p->timer.expires, now + max_delay) : |
3493 | try_to_del_timer_sync(timer: &p->timer) >= 0 && |
3494 | (brmctx->multicast_igmp_version == 2 || |
3495 | p->filter_mode == MCAST_EXCLUDE)) |
3496 | mod_timer(timer: &p->timer, expires: now + max_delay); |
3497 | } |
3498 | |
3499 | out: |
3500 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3501 | } |
3502 | |
3503 | #if IS_ENABLED(CONFIG_IPV6) |
3504 | static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, |
3505 | struct net_bridge_mcast_port *pmctx, |
3506 | struct sk_buff *skb, |
3507 | u16 vid) |
3508 | { |
3509 | unsigned int transport_len = ipv6_transport_len(skb); |
3510 | struct mld_msg *mld; |
3511 | struct net_bridge_mdb_entry *mp; |
3512 | struct mld2_query *mld2q; |
3513 | struct net_bridge_port_group *p; |
3514 | struct net_bridge_port_group __rcu **pp; |
3515 | struct br_ip saddr = {}; |
3516 | unsigned long max_delay; |
3517 | unsigned long now = jiffies; |
3518 | unsigned int offset = skb_transport_offset(skb); |
3519 | const struct in6_addr *group = NULL; |
3520 | bool is_general_query; |
3521 | int err = 0; |
3522 | |
3523 | spin_lock(lock: &brmctx->br->multicast_lock); |
3524 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3525 | goto out; |
3526 | |
3527 | if (transport_len == sizeof(*mld)) { |
3528 | if (!pskb_may_pull(skb, len: offset + sizeof(*mld))) { |
3529 | err = -EINVAL; |
3530 | goto out; |
3531 | } |
3532 | mld = (struct mld_msg *) icmp6_hdr(skb); |
3533 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); |
3534 | if (max_delay) |
3535 | group = &mld->mld_mca; |
3536 | } else { |
3537 | if (!pskb_may_pull(skb, len: offset + sizeof(*mld2q))) { |
3538 | err = -EINVAL; |
3539 | goto out; |
3540 | } |
3541 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
3542 | if (!mld2q->mld2q_nsrcs) |
3543 | group = &mld2q->mld2q_mca; |
3544 | if (brmctx->multicast_mld_version == 2 && |
3545 | !ipv6_addr_any(a: &mld2q->mld2q_mca) && |
3546 | mld2q->mld2q_suppress) |
3547 | goto out; |
3548 | |
3549 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); |
3550 | } |
3551 | |
3552 | is_general_query = group && ipv6_addr_any(a: group); |
3553 | |
3554 | if (is_general_query) { |
3555 | saddr.proto = htons(ETH_P_IPV6); |
3556 | saddr.src.ip6 = ipv6_hdr(skb)->saddr; |
3557 | |
3558 | br_ip6_multicast_query_received(brmctx, pmctx, |
3559 | query: &brmctx->ip6_other_query, |
3560 | saddr: &saddr, max_delay); |
3561 | goto out; |
3562 | } else if (!group) { |
3563 | goto out; |
3564 | } |
3565 | |
3566 | mp = br_mdb_ip6_get(br: brmctx->br, dst: group, vid); |
3567 | if (!mp) |
3568 | goto out; |
3569 | |
3570 | max_delay *= brmctx->multicast_last_member_count; |
3571 | if (mp->host_joined && |
3572 | (timer_pending(timer: &mp->timer) ? |
3573 | time_after(mp->timer.expires, now + max_delay) : |
3574 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) |
3575 | mod_timer(timer: &mp->timer, expires: now + max_delay); |
3576 | |
3577 | for (pp = &mp->ports; |
3578 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3579 | pp = &p->next) { |
3580 | if (timer_pending(timer: &p->timer) ? |
3581 | time_after(p->timer.expires, now + max_delay) : |
3582 | try_to_del_timer_sync(timer: &p->timer) >= 0 && |
3583 | (brmctx->multicast_mld_version == 1 || |
3584 | p->filter_mode == MCAST_EXCLUDE)) |
3585 | mod_timer(timer: &p->timer, expires: now + max_delay); |
3586 | } |
3587 | |
3588 | out: |
3589 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3590 | return err; |
3591 | } |
3592 | #endif |
3593 | |
3594 | static void |
3595 | br_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3596 | struct net_bridge_mcast_port *pmctx, |
3597 | struct br_ip *group, |
3598 | struct bridge_mcast_other_query *other_query, |
3599 | struct bridge_mcast_own_query *own_query, |
3600 | const unsigned char *src) |
3601 | { |
3602 | struct net_bridge_mdb_entry *mp; |
3603 | struct net_bridge_port_group *p; |
3604 | unsigned long now; |
3605 | unsigned long time; |
3606 | |
3607 | spin_lock(lock: &brmctx->br->multicast_lock); |
3608 | if (!br_multicast_ctx_should_use(brmctx, pmctx)) |
3609 | goto out; |
3610 | |
3611 | mp = br_mdb_ip_get(br: brmctx->br, dst: group); |
3612 | if (!mp) |
3613 | goto out; |
3614 | |
3615 | if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { |
3616 | struct net_bridge_port_group __rcu **pp; |
3617 | |
3618 | for (pp = &mp->ports; |
3619 | (p = mlock_dereference(*pp, brmctx->br)) != NULL; |
3620 | pp = &p->next) { |
3621 | if (!br_port_group_equal(p, port: pmctx->port, src)) |
3622 | continue; |
3623 | |
3624 | if (p->flags & MDB_PG_FLAGS_PERMANENT) |
3625 | break; |
3626 | |
3627 | p->flags |= MDB_PG_FLAGS_FAST_LEAVE; |
3628 | br_multicast_del_pg(mp, pg: p, pp); |
3629 | } |
3630 | goto out; |
3631 | } |
3632 | |
3633 | if (timer_pending(timer: &other_query->timer)) |
3634 | goto out; |
3635 | |
3636 | if (brmctx->multicast_querier) { |
3637 | __br_multicast_send_query(brmctx, pmctx, NULL, NULL, group: &mp->addr, |
3638 | with_srcs: false, sflag: 0, NULL); |
3639 | |
3640 | time = jiffies + brmctx->multicast_last_member_count * |
3641 | brmctx->multicast_last_member_interval; |
3642 | |
3643 | mod_timer(timer: &own_query->timer, expires: time); |
3644 | |
3645 | for (p = mlock_dereference(mp->ports, brmctx->br); |
3646 | p != NULL && pmctx != NULL; |
3647 | p = mlock_dereference(p->next, brmctx->br)) { |
3648 | if (!br_port_group_equal(p, port: pmctx->port, src)) |
3649 | continue; |
3650 | |
3651 | if (!hlist_unhashed(h: &p->mglist) && |
3652 | (timer_pending(timer: &p->timer) ? |
3653 | time_after(p->timer.expires, time) : |
3654 | try_to_del_timer_sync(timer: &p->timer) >= 0)) { |
3655 | mod_timer(timer: &p->timer, expires: time); |
3656 | } |
3657 | |
3658 | break; |
3659 | } |
3660 | } |
3661 | |
3662 | now = jiffies; |
3663 | time = now + brmctx->multicast_last_member_count * |
3664 | brmctx->multicast_last_member_interval; |
3665 | |
3666 | if (!pmctx) { |
3667 | if (mp->host_joined && |
3668 | (timer_pending(timer: &mp->timer) ? |
3669 | time_after(mp->timer.expires, time) : |
3670 | try_to_del_timer_sync(timer: &mp->timer) >= 0)) { |
3671 | mod_timer(timer: &mp->timer, expires: time); |
3672 | } |
3673 | |
3674 | goto out; |
3675 | } |
3676 | |
3677 | for (p = mlock_dereference(mp->ports, brmctx->br); |
3678 | p != NULL; |
3679 | p = mlock_dereference(p->next, brmctx->br)) { |
3680 | if (p->key.port != pmctx->port) |
3681 | continue; |
3682 | |
3683 | if (!hlist_unhashed(h: &p->mglist) && |
3684 | (timer_pending(timer: &p->timer) ? |
3685 | time_after(p->timer.expires, time) : |
3686 | try_to_del_timer_sync(timer: &p->timer) >= 0)) { |
3687 | mod_timer(timer: &p->timer, expires: time); |
3688 | } |
3689 | |
3690 | break; |
3691 | } |
3692 | out: |
3693 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3694 | } |
3695 | |
3696 | static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3697 | struct net_bridge_mcast_port *pmctx, |
3698 | __be32 group, |
3699 | __u16 vid, |
3700 | const unsigned char *src) |
3701 | { |
3702 | struct br_ip br_group; |
3703 | struct bridge_mcast_own_query *own_query; |
3704 | |
3705 | if (ipv4_is_local_multicast(addr: group)) |
3706 | return; |
3707 | |
3708 | own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; |
3709 | |
3710 | memset(&br_group, 0, sizeof(br_group)); |
3711 | br_group.dst.ip4 = group; |
3712 | br_group.proto = htons(ETH_P_IP); |
3713 | br_group.vid = vid; |
3714 | |
3715 | br_multicast_leave_group(brmctx, pmctx, group: &br_group, |
3716 | other_query: &brmctx->ip4_other_query, |
3717 | own_query, src); |
3718 | } |
3719 | |
3720 | #if IS_ENABLED(CONFIG_IPV6) |
3721 | static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, |
3722 | struct net_bridge_mcast_port *pmctx, |
3723 | const struct in6_addr *group, |
3724 | __u16 vid, |
3725 | const unsigned char *src) |
3726 | { |
3727 | struct br_ip br_group; |
3728 | struct bridge_mcast_own_query *own_query; |
3729 | |
3730 | if (ipv6_addr_is_ll_all_nodes(addr: group)) |
3731 | return; |
3732 | |
3733 | own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; |
3734 | |
3735 | memset(&br_group, 0, sizeof(br_group)); |
3736 | br_group.dst.ip6 = *group; |
3737 | br_group.proto = htons(ETH_P_IPV6); |
3738 | br_group.vid = vid; |
3739 | |
3740 | br_multicast_leave_group(brmctx, pmctx, group: &br_group, |
3741 | other_query: &brmctx->ip6_other_query, |
3742 | own_query, src); |
3743 | } |
3744 | #endif |
3745 | |
3746 | static void br_multicast_err_count(const struct net_bridge *br, |
3747 | const struct net_bridge_port *p, |
3748 | __be16 proto) |
3749 | { |
3750 | struct bridge_mcast_stats __percpu *stats; |
3751 | struct bridge_mcast_stats *pstats; |
3752 | |
3753 | if (!br_opt_get(br, opt: BROPT_MULTICAST_STATS_ENABLED)) |
3754 | return; |
3755 | |
3756 | if (p) |
3757 | stats = p->mcast_stats; |
3758 | else |
3759 | stats = br->mcast_stats; |
3760 | if (WARN_ON(!stats)) |
3761 | return; |
3762 | |
3763 | pstats = this_cpu_ptr(stats); |
3764 | |
3765 | u64_stats_update_begin(syncp: &pstats->syncp); |
3766 | switch (proto) { |
3767 | case htons(ETH_P_IP): |
3768 | pstats->mstats.igmp_parse_errors++; |
3769 | break; |
3770 | #if IS_ENABLED(CONFIG_IPV6) |
3771 | case htons(ETH_P_IPV6): |
3772 | pstats->mstats.mld_parse_errors++; |
3773 | break; |
3774 | #endif |
3775 | } |
3776 | u64_stats_update_end(syncp: &pstats->syncp); |
3777 | } |
3778 | |
3779 | static void br_multicast_pim(struct net_bridge_mcast *brmctx, |
3780 | struct net_bridge_mcast_port *pmctx, |
3781 | const struct sk_buff *skb) |
3782 | { |
3783 | unsigned int offset = skb_transport_offset(skb); |
3784 | struct pimhdr *pimhdr, _pimhdr; |
3785 | |
3786 | pimhdr = skb_header_pointer(skb, offset, len: sizeof(_pimhdr), buffer: &_pimhdr); |
3787 | if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || |
3788 | pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) |
3789 | return; |
3790 | |
3791 | spin_lock(lock: &brmctx->br->multicast_lock); |
3792 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3793 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3794 | } |
3795 | |
3796 | static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, |
3797 | struct net_bridge_mcast_port *pmctx, |
3798 | struct sk_buff *skb) |
3799 | { |
3800 | if (ip_hdr(skb)->protocol != IPPROTO_IGMP || |
3801 | igmp_hdr(skb)->type != IGMP_MRDISC_ADV) |
3802 | return -ENOMSG; |
3803 | |
3804 | spin_lock(lock: &brmctx->br->multicast_lock); |
3805 | br_ip4_multicast_mark_router(brmctx, pmctx); |
3806 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3807 | |
3808 | return 0; |
3809 | } |
3810 | |
3811 | static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, |
3812 | struct net_bridge_mcast_port *pmctx, |
3813 | struct sk_buff *skb, |
3814 | u16 vid) |
3815 | { |
3816 | struct net_bridge_port *p = pmctx ? pmctx->port : NULL; |
3817 | const unsigned char *src; |
3818 | struct igmphdr *ih; |
3819 | int err; |
3820 | |
3821 | err = ip_mc_check_igmp(skb); |
3822 | |
3823 | if (err == -ENOMSG) { |
3824 | if (!ipv4_is_local_multicast(addr: ip_hdr(skb)->daddr)) { |
3825 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3826 | } else if (pim_ipv4_all_pim_routers(addr: ip_hdr(skb)->daddr)) { |
3827 | if (ip_hdr(skb)->protocol == IPPROTO_PIM) |
3828 | br_multicast_pim(brmctx, pmctx, skb); |
3829 | } else if (ipv4_is_all_snoopers(addr: ip_hdr(skb)->daddr)) { |
3830 | br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); |
3831 | } |
3832 | |
3833 | return 0; |
3834 | } else if (err < 0) { |
3835 | br_multicast_err_count(br: brmctx->br, p, proto: skb->protocol); |
3836 | return err; |
3837 | } |
3838 | |
3839 | ih = igmp_hdr(skb); |
3840 | src = eth_hdr(skb)->h_source; |
3841 | BR_INPUT_SKB_CB(skb)->igmp = ih->type; |
3842 | |
3843 | switch (ih->type) { |
3844 | case IGMP_HOST_MEMBERSHIP_REPORT: |
3845 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
3846 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3847 | err = br_ip4_multicast_add_group(brmctx, pmctx, group: ih->group, vid, |
3848 | src, igmpv2: true); |
3849 | break; |
3850 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
3851 | err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); |
3852 | break; |
3853 | case IGMP_HOST_MEMBERSHIP_QUERY: |
3854 | br_ip4_multicast_query(brmctx, pmctx, skb, vid); |
3855 | break; |
3856 | case IGMP_HOST_LEAVE_MESSAGE: |
3857 | br_ip4_multicast_leave_group(brmctx, pmctx, group: ih->group, vid, src); |
3858 | break; |
3859 | } |
3860 | |
3861 | br_multicast_count(br: brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, |
3862 | dir: BR_MCAST_DIR_RX); |
3863 | |
3864 | return err; |
3865 | } |
3866 | |
3867 | #if IS_ENABLED(CONFIG_IPV6) |
3868 | static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, |
3869 | struct net_bridge_mcast_port *pmctx, |
3870 | struct sk_buff *skb) |
3871 | { |
3872 | if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) |
3873 | return; |
3874 | |
3875 | spin_lock(lock: &brmctx->br->multicast_lock); |
3876 | br_ip6_multicast_mark_router(brmctx, pmctx); |
3877 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3878 | } |
3879 | |
3880 | static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, |
3881 | struct net_bridge_mcast_port *pmctx, |
3882 | struct sk_buff *skb, |
3883 | u16 vid) |
3884 | { |
3885 | struct net_bridge_port *p = pmctx ? pmctx->port : NULL; |
3886 | const unsigned char *src; |
3887 | struct mld_msg *mld; |
3888 | int err; |
3889 | |
3890 | err = ipv6_mc_check_mld(skb); |
3891 | |
3892 | if (err == -ENOMSG || err == -ENODATA) { |
3893 | if (!ipv6_addr_is_ll_all_nodes(addr: &ipv6_hdr(skb)->daddr)) |
3894 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3895 | if (err == -ENODATA && |
3896 | ipv6_addr_is_all_snoopers(addr: &ipv6_hdr(skb)->daddr)) |
3897 | br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); |
3898 | |
3899 | return 0; |
3900 | } else if (err < 0) { |
3901 | br_multicast_err_count(br: brmctx->br, p, proto: skb->protocol); |
3902 | return err; |
3903 | } |
3904 | |
3905 | mld = (struct mld_msg *)skb_transport_header(skb); |
3906 | BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; |
3907 | |
3908 | switch (mld->mld_type) { |
3909 | case ICMPV6_MGM_REPORT: |
3910 | src = eth_hdr(skb)->h_source; |
3911 | BR_INPUT_SKB_CB(skb)->mrouters_only = 1; |
3912 | err = br_ip6_multicast_add_group(brmctx, pmctx, group: &mld->mld_mca, |
3913 | vid, src, mldv1: true); |
3914 | break; |
3915 | case ICMPV6_MLD2_REPORT: |
3916 | err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); |
3917 | break; |
3918 | case ICMPV6_MGM_QUERY: |
3919 | err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); |
3920 | break; |
3921 | case ICMPV6_MGM_REDUCTION: |
3922 | src = eth_hdr(skb)->h_source; |
3923 | br_ip6_multicast_leave_group(brmctx, pmctx, group: &mld->mld_mca, vid, |
3924 | src); |
3925 | break; |
3926 | } |
3927 | |
3928 | br_multicast_count(br: brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, |
3929 | dir: BR_MCAST_DIR_RX); |
3930 | |
3931 | return err; |
3932 | } |
3933 | #endif |
3934 | |
3935 | int br_multicast_rcv(struct net_bridge_mcast **brmctx, |
3936 | struct net_bridge_mcast_port **pmctx, |
3937 | struct net_bridge_vlan *vlan, |
3938 | struct sk_buff *skb, u16 vid) |
3939 | { |
3940 | int ret = 0; |
3941 | |
3942 | BR_INPUT_SKB_CB(skb)->igmp = 0; |
3943 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; |
3944 | |
3945 | if (!br_opt_get(br: (*brmctx)->br, opt: BROPT_MULTICAST_ENABLED)) |
3946 | return 0; |
3947 | |
3948 | if (br_opt_get(br: (*brmctx)->br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { |
3949 | const struct net_bridge_vlan *masterv; |
3950 | |
3951 | /* the vlan has the master flag set only when transmitting |
3952 | * through the bridge device |
3953 | */ |
3954 | if (br_vlan_is_master(v: vlan)) { |
3955 | masterv = vlan; |
3956 | *brmctx = &vlan->br_mcast_ctx; |
3957 | *pmctx = NULL; |
3958 | } else { |
3959 | masterv = vlan->brvlan; |
3960 | *brmctx = &vlan->brvlan->br_mcast_ctx; |
3961 | *pmctx = &vlan->port_mcast_ctx; |
3962 | } |
3963 | |
3964 | if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) |
3965 | return 0; |
3966 | } |
3967 | |
3968 | switch (skb->protocol) { |
3969 | case htons(ETH_P_IP): |
3970 | ret = br_multicast_ipv4_rcv(brmctx: *brmctx, pmctx: *pmctx, skb, vid); |
3971 | break; |
3972 | #if IS_ENABLED(CONFIG_IPV6) |
3973 | case htons(ETH_P_IPV6): |
3974 | ret = br_multicast_ipv6_rcv(brmctx: *brmctx, pmctx: *pmctx, skb, vid); |
3975 | break; |
3976 | #endif |
3977 | } |
3978 | |
3979 | return ret; |
3980 | } |
3981 | |
3982 | static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, |
3983 | struct bridge_mcast_own_query *query, |
3984 | struct bridge_mcast_querier *querier) |
3985 | { |
3986 | spin_lock(lock: &brmctx->br->multicast_lock); |
3987 | if (br_multicast_ctx_vlan_disabled(brmctx)) |
3988 | goto out; |
3989 | |
3990 | if (query->startup_sent < brmctx->multicast_startup_query_count) |
3991 | query->startup_sent++; |
3992 | |
3993 | br_multicast_send_query(brmctx, NULL, own_query: query); |
3994 | out: |
3995 | spin_unlock(lock: &brmctx->br->multicast_lock); |
3996 | } |
3997 | |
3998 | static void br_ip4_multicast_query_expired(struct timer_list *t) |
3999 | { |
4000 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
4001 | ip4_own_query.timer); |
4002 | |
4003 | br_multicast_query_expired(brmctx, query: &brmctx->ip4_own_query, |
4004 | querier: &brmctx->ip4_querier); |
4005 | } |
4006 | |
4007 | #if IS_ENABLED(CONFIG_IPV6) |
4008 | static void br_ip6_multicast_query_expired(struct timer_list *t) |
4009 | { |
4010 | struct net_bridge_mcast *brmctx = from_timer(brmctx, t, |
4011 | ip6_own_query.timer); |
4012 | |
4013 | br_multicast_query_expired(brmctx, query: &brmctx->ip6_own_query, |
4014 | querier: &brmctx->ip6_querier); |
4015 | } |
4016 | #endif |
4017 | |
4018 | static void br_multicast_gc_work(struct work_struct *work) |
4019 | { |
4020 | struct net_bridge *br = container_of(work, struct net_bridge, |
4021 | mcast_gc_work); |
4022 | HLIST_HEAD(deleted_head); |
4023 | |
4024 | spin_lock_bh(lock: &br->multicast_lock); |
4025 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
4026 | spin_unlock_bh(lock: &br->multicast_lock); |
4027 | |
4028 | br_multicast_gc(head: &deleted_head); |
4029 | } |
4030 | |
4031 | void br_multicast_ctx_init(struct net_bridge *br, |
4032 | struct net_bridge_vlan *vlan, |
4033 | struct net_bridge_mcast *brmctx) |
4034 | { |
4035 | brmctx->br = br; |
4036 | brmctx->vlan = vlan; |
4037 | brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4038 | brmctx->multicast_last_member_count = 2; |
4039 | brmctx->multicast_startup_query_count = 2; |
4040 | |
4041 | brmctx->multicast_last_member_interval = HZ; |
4042 | brmctx->multicast_query_response_interval = 10 * HZ; |
4043 | brmctx->multicast_startup_query_interval = 125 * HZ / 4; |
4044 | brmctx->multicast_query_interval = 125 * HZ; |
4045 | brmctx->multicast_querier_interval = 255 * HZ; |
4046 | brmctx->multicast_membership_interval = 260 * HZ; |
4047 | |
4048 | brmctx->ip4_querier.port_ifidx = 0; |
4049 | seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); |
4050 | brmctx->multicast_igmp_version = 2; |
4051 | #if IS_ENABLED(CONFIG_IPV6) |
4052 | brmctx->multicast_mld_version = 1; |
4053 | brmctx->ip6_querier.port_ifidx = 0; |
4054 | seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); |
4055 | #endif |
4056 | |
4057 | timer_setup(&brmctx->ip4_mc_router_timer, |
4058 | br_ip4_multicast_local_router_expired, 0); |
4059 | timer_setup(&brmctx->ip4_other_query.timer, |
4060 | br_ip4_multicast_querier_expired, 0); |
4061 | timer_setup(&brmctx->ip4_other_query.delay_timer, |
4062 | br_multicast_query_delay_expired, 0); |
4063 | timer_setup(&brmctx->ip4_own_query.timer, |
4064 | br_ip4_multicast_query_expired, 0); |
4065 | #if IS_ENABLED(CONFIG_IPV6) |
4066 | timer_setup(&brmctx->ip6_mc_router_timer, |
4067 | br_ip6_multicast_local_router_expired, 0); |
4068 | timer_setup(&brmctx->ip6_other_query.timer, |
4069 | br_ip6_multicast_querier_expired, 0); |
4070 | timer_setup(&brmctx->ip6_other_query.delay_timer, |
4071 | br_multicast_query_delay_expired, 0); |
4072 | timer_setup(&brmctx->ip6_own_query.timer, |
4073 | br_ip6_multicast_query_expired, 0); |
4074 | #endif |
4075 | } |
4076 | |
4077 | void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) |
4078 | { |
4079 | __br_multicast_stop(brmctx); |
4080 | } |
4081 | |
4082 | void br_multicast_init(struct net_bridge *br) |
4083 | { |
4084 | br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; |
4085 | |
4086 | br_multicast_ctx_init(br, NULL, brmctx: &br->multicast_ctx); |
4087 | |
4088 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: true); |
4089 | br_opt_toggle(br, opt: BROPT_HAS_IPV6_ADDR, on: true); |
4090 | |
4091 | spin_lock_init(&br->multicast_lock); |
4092 | INIT_HLIST_HEAD(&br->mdb_list); |
4093 | INIT_HLIST_HEAD(&br->mcast_gc_list); |
4094 | INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); |
4095 | } |
4096 | |
4097 | static void br_ip4_multicast_join_snoopers(struct net_bridge *br) |
4098 | { |
4099 | struct in_device *in_dev = in_dev_get(dev: br->dev); |
4100 | |
4101 | if (!in_dev) |
4102 | return; |
4103 | |
4104 | __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); |
4105 | in_dev_put(idev: in_dev); |
4106 | } |
4107 | |
4108 | #if IS_ENABLED(CONFIG_IPV6) |
4109 | static void br_ip6_multicast_join_snoopers(struct net_bridge *br) |
4110 | { |
4111 | struct in6_addr addr; |
4112 | |
4113 | ipv6_addr_set(addr: &addr, htonl(0xff020000), w2: 0, w3: 0, htonl(0x6a)); |
4114 | ipv6_dev_mc_inc(dev: br->dev, addr: &addr); |
4115 | } |
4116 | #else |
4117 | static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) |
4118 | { |
4119 | } |
4120 | #endif |
4121 | |
4122 | void br_multicast_join_snoopers(struct net_bridge *br) |
4123 | { |
4124 | br_ip4_multicast_join_snoopers(br); |
4125 | br_ip6_multicast_join_snoopers(br); |
4126 | } |
4127 | |
4128 | static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) |
4129 | { |
4130 | struct in_device *in_dev = in_dev_get(dev: br->dev); |
4131 | |
4132 | if (WARN_ON(!in_dev)) |
4133 | return; |
4134 | |
4135 | __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); |
4136 | in_dev_put(idev: in_dev); |
4137 | } |
4138 | |
4139 | #if IS_ENABLED(CONFIG_IPV6) |
4140 | static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) |
4141 | { |
4142 | struct in6_addr addr; |
4143 | |
4144 | ipv6_addr_set(addr: &addr, htonl(0xff020000), w2: 0, w3: 0, htonl(0x6a)); |
4145 | ipv6_dev_mc_dec(dev: br->dev, addr: &addr); |
4146 | } |
4147 | #else |
4148 | static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) |
4149 | { |
4150 | } |
4151 | #endif |
4152 | |
4153 | void br_multicast_leave_snoopers(struct net_bridge *br) |
4154 | { |
4155 | br_ip4_multicast_leave_snoopers(br); |
4156 | br_ip6_multicast_leave_snoopers(br); |
4157 | } |
4158 | |
4159 | static void __br_multicast_open_query(struct net_bridge *br, |
4160 | struct bridge_mcast_own_query *query) |
4161 | { |
4162 | query->startup_sent = 0; |
4163 | |
4164 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
4165 | return; |
4166 | |
4167 | mod_timer(timer: &query->timer, expires: jiffies); |
4168 | } |
4169 | |
4170 | static void __br_multicast_open(struct net_bridge_mcast *brmctx) |
4171 | { |
4172 | __br_multicast_open_query(br: brmctx->br, query: &brmctx->ip4_own_query); |
4173 | #if IS_ENABLED(CONFIG_IPV6) |
4174 | __br_multicast_open_query(br: brmctx->br, query: &brmctx->ip6_own_query); |
4175 | #endif |
4176 | } |
4177 | |
4178 | void br_multicast_open(struct net_bridge *br) |
4179 | { |
4180 | ASSERT_RTNL(); |
4181 | |
4182 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { |
4183 | struct net_bridge_vlan_group *vg; |
4184 | struct net_bridge_vlan *vlan; |
4185 | |
4186 | vg = br_vlan_group(br); |
4187 | if (vg) { |
4188 | list_for_each_entry(vlan, &vg->vlan_list, vlist) { |
4189 | struct net_bridge_mcast *brmctx; |
4190 | |
4191 | brmctx = &vlan->br_mcast_ctx; |
4192 | if (br_vlan_is_brentry(v: vlan) && |
4193 | !br_multicast_ctx_vlan_disabled(brmctx)) |
4194 | __br_multicast_open(brmctx: &vlan->br_mcast_ctx); |
4195 | } |
4196 | } |
4197 | } else { |
4198 | __br_multicast_open(brmctx: &br->multicast_ctx); |
4199 | } |
4200 | } |
4201 | |
4202 | static void __br_multicast_stop(struct net_bridge_mcast *brmctx) |
4203 | { |
4204 | del_timer_sync(timer: &brmctx->ip4_mc_router_timer); |
4205 | del_timer_sync(timer: &brmctx->ip4_other_query.timer); |
4206 | del_timer_sync(timer: &brmctx->ip4_other_query.delay_timer); |
4207 | del_timer_sync(timer: &brmctx->ip4_own_query.timer); |
4208 | #if IS_ENABLED(CONFIG_IPV6) |
4209 | del_timer_sync(timer: &brmctx->ip6_mc_router_timer); |
4210 | del_timer_sync(timer: &brmctx->ip6_other_query.timer); |
4211 | del_timer_sync(timer: &brmctx->ip6_other_query.delay_timer); |
4212 | del_timer_sync(timer: &brmctx->ip6_own_query.timer); |
4213 | #endif |
4214 | } |
4215 | |
4216 | void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) |
4217 | { |
4218 | struct net_bridge *br; |
4219 | |
4220 | /* it's okay to check for the flag without the multicast lock because it |
4221 | * can only change under RTNL -> multicast_lock, we need the latter to |
4222 | * sync with timers and packets |
4223 | */ |
4224 | if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) |
4225 | return; |
4226 | |
4227 | if (br_vlan_is_master(v: vlan)) { |
4228 | br = vlan->br; |
4229 | |
4230 | if (!br_vlan_is_brentry(v: vlan) || |
4231 | (on && |
4232 | br_multicast_ctx_vlan_global_disabled(brmctx: &vlan->br_mcast_ctx))) |
4233 | return; |
4234 | |
4235 | spin_lock_bh(lock: &br->multicast_lock); |
4236 | vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; |
4237 | spin_unlock_bh(lock: &br->multicast_lock); |
4238 | |
4239 | if (on) |
4240 | __br_multicast_open(brmctx: &vlan->br_mcast_ctx); |
4241 | else |
4242 | __br_multicast_stop(brmctx: &vlan->br_mcast_ctx); |
4243 | } else { |
4244 | struct net_bridge_mcast *brmctx; |
4245 | |
4246 | brmctx = br_multicast_port_ctx_get_global(pmctx: &vlan->port_mcast_ctx); |
4247 | if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) |
4248 | return; |
4249 | |
4250 | br = vlan->port->br; |
4251 | spin_lock_bh(lock: &br->multicast_lock); |
4252 | vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; |
4253 | if (on) |
4254 | __br_multicast_enable_port_ctx(pmctx: &vlan->port_mcast_ctx); |
4255 | else |
4256 | __br_multicast_disable_port_ctx(pmctx: &vlan->port_mcast_ctx); |
4257 | spin_unlock_bh(lock: &br->multicast_lock); |
4258 | } |
4259 | } |
4260 | |
4261 | static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) |
4262 | { |
4263 | struct net_bridge_port *p; |
4264 | |
4265 | if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) |
4266 | return; |
4267 | |
4268 | list_for_each_entry(p, &vlan->br->port_list, list) { |
4269 | struct net_bridge_vlan *vport; |
4270 | |
4271 | vport = br_vlan_find(vg: nbp_vlan_group(p), vid: vlan->vid); |
4272 | if (!vport) |
4273 | continue; |
4274 | br_multicast_toggle_one_vlan(vlan: vport, on); |
4275 | } |
4276 | |
4277 | if (br_vlan_is_brentry(v: vlan)) |
4278 | br_multicast_toggle_one_vlan(vlan, on); |
4279 | } |
4280 | |
4281 | int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, |
4282 | struct netlink_ext_ack *extack) |
4283 | { |
4284 | struct net_bridge_vlan_group *vg; |
4285 | struct net_bridge_vlan *vlan; |
4286 | struct net_bridge_port *p; |
4287 | |
4288 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) |
4289 | return 0; |
4290 | |
4291 | if (on && !br_opt_get(br, opt: BROPT_VLAN_ENABLED)) { |
4292 | NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); |
4293 | return -EINVAL; |
4294 | } |
4295 | |
4296 | vg = br_vlan_group(br); |
4297 | if (!vg) |
4298 | return 0; |
4299 | |
4300 | br_opt_toggle(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); |
4301 | |
4302 | /* disable/enable non-vlan mcast contexts based on vlan snooping */ |
4303 | if (on) |
4304 | __br_multicast_stop(brmctx: &br->multicast_ctx); |
4305 | else |
4306 | __br_multicast_open(brmctx: &br->multicast_ctx); |
4307 | list_for_each_entry(p, &br->port_list, list) { |
4308 | if (on) |
4309 | br_multicast_disable_port(port: p); |
4310 | else |
4311 | br_multicast_enable_port(port: p); |
4312 | } |
4313 | |
4314 | list_for_each_entry(vlan, &vg->vlan_list, vlist) |
4315 | br_multicast_toggle_vlan(vlan, on); |
4316 | |
4317 | return 0; |
4318 | } |
4319 | |
4320 | bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) |
4321 | { |
4322 | ASSERT_RTNL(); |
4323 | |
4324 | /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and |
4325 | * requires only RTNL to change |
4326 | */ |
4327 | if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) |
4328 | return false; |
4329 | |
4330 | vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; |
4331 | br_multicast_toggle_vlan(vlan, on); |
4332 | |
4333 | return true; |
4334 | } |
4335 | |
4336 | void br_multicast_stop(struct net_bridge *br) |
4337 | { |
4338 | ASSERT_RTNL(); |
4339 | |
4340 | if (br_opt_get(br, opt: BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { |
4341 | struct net_bridge_vlan_group *vg; |
4342 | struct net_bridge_vlan *vlan; |
4343 | |
4344 | vg = br_vlan_group(br); |
4345 | if (vg) { |
4346 | list_for_each_entry(vlan, &vg->vlan_list, vlist) { |
4347 | struct net_bridge_mcast *brmctx; |
4348 | |
4349 | brmctx = &vlan->br_mcast_ctx; |
4350 | if (br_vlan_is_brentry(v: vlan) && |
4351 | !br_multicast_ctx_vlan_disabled(brmctx)) |
4352 | __br_multicast_stop(brmctx: &vlan->br_mcast_ctx); |
4353 | } |
4354 | } |
4355 | } else { |
4356 | __br_multicast_stop(brmctx: &br->multicast_ctx); |
4357 | } |
4358 | } |
4359 | |
4360 | void br_multicast_dev_del(struct net_bridge *br) |
4361 | { |
4362 | struct net_bridge_mdb_entry *mp; |
4363 | HLIST_HEAD(deleted_head); |
4364 | struct hlist_node *tmp; |
4365 | |
4366 | spin_lock_bh(lock: &br->multicast_lock); |
4367 | hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) |
4368 | br_multicast_del_mdb_entry(mp); |
4369 | hlist_move_list(old: &br->mcast_gc_list, new: &deleted_head); |
4370 | spin_unlock_bh(lock: &br->multicast_lock); |
4371 | |
4372 | br_multicast_ctx_deinit(brmctx: &br->multicast_ctx); |
4373 | br_multicast_gc(head: &deleted_head); |
4374 | cancel_work_sync(work: &br->mcast_gc_work); |
4375 | |
4376 | rcu_barrier(); |
4377 | } |
4378 | |
4379 | int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) |
4380 | { |
4381 | int err = -EINVAL; |
4382 | |
4383 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4384 | |
4385 | switch (val) { |
4386 | case MDB_RTR_TYPE_DISABLED: |
4387 | case MDB_RTR_TYPE_PERM: |
4388 | br_mc_router_state_change(p: brmctx->br, is_mc_router: val == MDB_RTR_TYPE_PERM); |
4389 | del_timer(timer: &brmctx->ip4_mc_router_timer); |
4390 | #if IS_ENABLED(CONFIG_IPV6) |
4391 | del_timer(timer: &brmctx->ip6_mc_router_timer); |
4392 | #endif |
4393 | brmctx->multicast_router = val; |
4394 | err = 0; |
4395 | break; |
4396 | case MDB_RTR_TYPE_TEMP_QUERY: |
4397 | if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) |
4398 | br_mc_router_state_change(p: brmctx->br, is_mc_router: false); |
4399 | brmctx->multicast_router = val; |
4400 | err = 0; |
4401 | break; |
4402 | } |
4403 | |
4404 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4405 | |
4406 | return err; |
4407 | } |
4408 | |
4409 | static void |
4410 | br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) |
4411 | { |
4412 | if (!deleted) |
4413 | return; |
4414 | |
4415 | /* For backwards compatibility for now, only notify if there is |
4416 | * no multicast router anymore for both IPv4 and IPv6. |
4417 | */ |
4418 | if (!hlist_unhashed(h: &pmctx->ip4_rlist)) |
4419 | return; |
4420 | #if IS_ENABLED(CONFIG_IPV6) |
4421 | if (!hlist_unhashed(h: &pmctx->ip6_rlist)) |
4422 | return; |
4423 | #endif |
4424 | |
4425 | br_rtr_notify(dev: pmctx->port->br->dev, pmctx, RTM_DELMDB); |
4426 | br_port_mc_router_state_change(p: pmctx->port, is_mc_router: false); |
4427 | |
4428 | /* don't allow timer refresh */ |
4429 | if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) |
4430 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4431 | } |
4432 | |
4433 | int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, |
4434 | unsigned long val) |
4435 | { |
4436 | struct net_bridge_mcast *brmctx; |
4437 | unsigned long now = jiffies; |
4438 | int err = -EINVAL; |
4439 | bool del = false; |
4440 | |
4441 | brmctx = br_multicast_port_ctx_get_global(pmctx); |
4442 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4443 | if (pmctx->multicast_router == val) { |
4444 | /* Refresh the temp router port timer */ |
4445 | if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { |
4446 | mod_timer(timer: &pmctx->ip4_mc_router_timer, |
4447 | expires: now + brmctx->multicast_querier_interval); |
4448 | #if IS_ENABLED(CONFIG_IPV6) |
4449 | mod_timer(timer: &pmctx->ip6_mc_router_timer, |
4450 | expires: now + brmctx->multicast_querier_interval); |
4451 | #endif |
4452 | } |
4453 | err = 0; |
4454 | goto unlock; |
4455 | } |
4456 | switch (val) { |
4457 | case MDB_RTR_TYPE_DISABLED: |
4458 | pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; |
4459 | del |= br_ip4_multicast_rport_del(pmctx); |
4460 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
4461 | del |= br_ip6_multicast_rport_del(pmctx); |
4462 | #if IS_ENABLED(CONFIG_IPV6) |
4463 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
4464 | #endif |
4465 | br_multicast_rport_del_notify(pmctx, deleted: del); |
4466 | break; |
4467 | case MDB_RTR_TYPE_TEMP_QUERY: |
4468 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; |
4469 | del |= br_ip4_multicast_rport_del(pmctx); |
4470 | del |= br_ip6_multicast_rport_del(pmctx); |
4471 | br_multicast_rport_del_notify(pmctx, deleted: del); |
4472 | break; |
4473 | case MDB_RTR_TYPE_PERM: |
4474 | pmctx->multicast_router = MDB_RTR_TYPE_PERM; |
4475 | del_timer(timer: &pmctx->ip4_mc_router_timer); |
4476 | br_ip4_multicast_add_router(brmctx, pmctx); |
4477 | #if IS_ENABLED(CONFIG_IPV6) |
4478 | del_timer(timer: &pmctx->ip6_mc_router_timer); |
4479 | #endif |
4480 | br_ip6_multicast_add_router(brmctx, pmctx); |
4481 | break; |
4482 | case MDB_RTR_TYPE_TEMP: |
4483 | pmctx->multicast_router = MDB_RTR_TYPE_TEMP; |
4484 | br_ip4_multicast_mark_router(brmctx, pmctx); |
4485 | br_ip6_multicast_mark_router(brmctx, pmctx); |
4486 | break; |
4487 | default: |
4488 | goto unlock; |
4489 | } |
4490 | err = 0; |
4491 | unlock: |
4492 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4493 | |
4494 | return err; |
4495 | } |
4496 | |
4497 | int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) |
4498 | { |
4499 | int err; |
4500 | |
4501 | if (br_vlan_is_master(v)) |
4502 | err = br_multicast_set_router(brmctx: &v->br_mcast_ctx, val: mcast_router); |
4503 | else |
4504 | err = br_multicast_set_port_router(pmctx: &v->port_mcast_ctx, |
4505 | val: mcast_router); |
4506 | |
4507 | return err; |
4508 | } |
4509 | |
4510 | static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, |
4511 | struct bridge_mcast_own_query *query) |
4512 | { |
4513 | struct net_bridge_port *port; |
4514 | |
4515 | if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) |
4516 | return; |
4517 | |
4518 | __br_multicast_open_query(br: brmctx->br, query); |
4519 | |
4520 | rcu_read_lock(); |
4521 | list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { |
4522 | struct bridge_mcast_own_query *ip4_own_query; |
4523 | #if IS_ENABLED(CONFIG_IPV6) |
4524 | struct bridge_mcast_own_query *ip6_own_query; |
4525 | #endif |
4526 | |
4527 | if (br_multicast_port_ctx_state_stopped(pmctx: &port->multicast_ctx)) |
4528 | continue; |
4529 | |
4530 | if (br_multicast_ctx_is_vlan(brmctx)) { |
4531 | struct net_bridge_vlan *vlan; |
4532 | |
4533 | vlan = br_vlan_find(vg: nbp_vlan_group_rcu(p: port), |
4534 | vid: brmctx->vlan->vid); |
4535 | if (!vlan || |
4536 | br_multicast_port_ctx_state_stopped(pmctx: &vlan->port_mcast_ctx)) |
4537 | continue; |
4538 | |
4539 | ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; |
4540 | #if IS_ENABLED(CONFIG_IPV6) |
4541 | ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; |
4542 | #endif |
4543 | } else { |
4544 | ip4_own_query = &port->multicast_ctx.ip4_own_query; |
4545 | #if IS_ENABLED(CONFIG_IPV6) |
4546 | ip6_own_query = &port->multicast_ctx.ip6_own_query; |
4547 | #endif |
4548 | } |
4549 | |
4550 | if (query == &brmctx->ip4_own_query) |
4551 | br_multicast_enable(query: ip4_own_query); |
4552 | #if IS_ENABLED(CONFIG_IPV6) |
4553 | else |
4554 | br_multicast_enable(query: ip6_own_query); |
4555 | #endif |
4556 | } |
4557 | rcu_read_unlock(); |
4558 | } |
4559 | |
4560 | int br_multicast_toggle(struct net_bridge *br, unsigned long val, |
4561 | struct netlink_ext_ack *extack) |
4562 | { |
4563 | struct net_bridge_port *port; |
4564 | bool change_snoopers = false; |
4565 | int err = 0; |
4566 | |
4567 | spin_lock_bh(lock: &br->multicast_lock); |
4568 | if (!!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED) == !!val) |
4569 | goto unlock; |
4570 | |
4571 | err = br_mc_disabled_update(dev: br->dev, value: val, extack); |
4572 | if (err == -EOPNOTSUPP) |
4573 | err = 0; |
4574 | if (err) |
4575 | goto unlock; |
4576 | |
4577 | br_opt_toggle(br, opt: BROPT_MULTICAST_ENABLED, on: !!val); |
4578 | if (!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) { |
4579 | change_snoopers = true; |
4580 | goto unlock; |
4581 | } |
4582 | |
4583 | if (!netif_running(dev: br->dev)) |
4584 | goto unlock; |
4585 | |
4586 | br_multicast_open(br); |
4587 | list_for_each_entry(port, &br->port_list, list) |
4588 | __br_multicast_enable_port_ctx(pmctx: &port->multicast_ctx); |
4589 | |
4590 | change_snoopers = true; |
4591 | |
4592 | unlock: |
4593 | spin_unlock_bh(lock: &br->multicast_lock); |
4594 | |
4595 | /* br_multicast_join_snoopers has the potential to cause |
4596 | * an MLD Report/Leave to be delivered to br_multicast_rcv, |
4597 | * which would in turn call br_multicast_add_group, which would |
4598 | * attempt to acquire multicast_lock. This function should be |
4599 | * called after the lock has been released to avoid deadlocks on |
4600 | * multicast_lock. |
4601 | * |
4602 | * br_multicast_leave_snoopers does not have the problem since |
4603 | * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and |
4604 | * returns without calling br_multicast_ipv4/6_rcv if it's not |
4605 | * enabled. Moved both functions out just for symmetry. |
4606 | */ |
4607 | if (change_snoopers) { |
4608 | if (br_opt_get(br, opt: BROPT_MULTICAST_ENABLED)) |
4609 | br_multicast_join_snoopers(br); |
4610 | else |
4611 | br_multicast_leave_snoopers(br); |
4612 | } |
4613 | |
4614 | return err; |
4615 | } |
4616 | |
4617 | bool br_multicast_enabled(const struct net_device *dev) |
4618 | { |
4619 | struct net_bridge *br = netdev_priv(dev); |
4620 | |
4621 | return !!br_opt_get(br, opt: BROPT_MULTICAST_ENABLED); |
4622 | } |
4623 | EXPORT_SYMBOL_GPL(br_multicast_enabled); |
4624 | |
4625 | bool br_multicast_router(const struct net_device *dev) |
4626 | { |
4627 | struct net_bridge *br = netdev_priv(dev); |
4628 | bool is_router; |
4629 | |
4630 | spin_lock_bh(lock: &br->multicast_lock); |
4631 | is_router = br_multicast_is_router(brmctx: &br->multicast_ctx, NULL); |
4632 | spin_unlock_bh(lock: &br->multicast_lock); |
4633 | return is_router; |
4634 | } |
4635 | EXPORT_SYMBOL_GPL(br_multicast_router); |
4636 | |
4637 | int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) |
4638 | { |
4639 | unsigned long max_delay; |
4640 | |
4641 | val = !!val; |
4642 | |
4643 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4644 | if (brmctx->multicast_querier == val) |
4645 | goto unlock; |
4646 | |
4647 | WRITE_ONCE(brmctx->multicast_querier, val); |
4648 | if (!val) |
4649 | goto unlock; |
4650 | |
4651 | max_delay = brmctx->multicast_query_response_interval; |
4652 | |
4653 | if (!timer_pending(timer: &brmctx->ip4_other_query.timer)) |
4654 | mod_timer(timer: &brmctx->ip4_other_query.delay_timer, |
4655 | expires: jiffies + max_delay); |
4656 | |
4657 | br_multicast_start_querier(brmctx, query: &brmctx->ip4_own_query); |
4658 | |
4659 | #if IS_ENABLED(CONFIG_IPV6) |
4660 | if (!timer_pending(timer: &brmctx->ip6_other_query.timer)) |
4661 | mod_timer(timer: &brmctx->ip6_other_query.delay_timer, |
4662 | expires: jiffies + max_delay); |
4663 | |
4664 | br_multicast_start_querier(brmctx, query: &brmctx->ip6_own_query); |
4665 | #endif |
4666 | |
4667 | unlock: |
4668 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4669 | |
4670 | return 0; |
4671 | } |
4672 | |
4673 | int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, |
4674 | unsigned long val) |
4675 | { |
4676 | /* Currently we support only version 2 and 3 */ |
4677 | switch (val) { |
4678 | case 2: |
4679 | case 3: |
4680 | break; |
4681 | default: |
4682 | return -EINVAL; |
4683 | } |
4684 | |
4685 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4686 | brmctx->multicast_igmp_version = val; |
4687 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4688 | |
4689 | return 0; |
4690 | } |
4691 | |
4692 | #if IS_ENABLED(CONFIG_IPV6) |
4693 | int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, |
4694 | unsigned long val) |
4695 | { |
4696 | /* Currently we support version 1 and 2 */ |
4697 | switch (val) { |
4698 | case 1: |
4699 | case 2: |
4700 | break; |
4701 | default: |
4702 | return -EINVAL; |
4703 | } |
4704 | |
4705 | spin_lock_bh(lock: &brmctx->br->multicast_lock); |
4706 | brmctx->multicast_mld_version = val; |
4707 | spin_unlock_bh(lock: &brmctx->br->multicast_lock); |
4708 | |
4709 | return 0; |
4710 | } |
4711 | #endif |
4712 | |
4713 | void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, |
4714 | unsigned long val) |
4715 | { |
4716 | unsigned long intvl_jiffies = clock_t_to_jiffies(x: val); |
4717 | |
4718 | if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { |
4719 | br_info(brmctx->br, |
4720 | "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", |
4721 | jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), |
4722 | jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); |
4723 | intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; |
4724 | } |
4725 | |
4726 | brmctx->multicast_query_interval = intvl_jiffies; |
4727 | } |
4728 | |
4729 | void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, |
4730 | unsigned long val) |
4731 | { |
4732 | unsigned long intvl_jiffies = clock_t_to_jiffies(x: val); |
4733 | |
4734 | if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { |
4735 | br_info(brmctx->br, |
4736 | "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", |
4737 | jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), |
4738 | jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); |
4739 | intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; |
4740 | } |
4741 | |
4742 | brmctx->multicast_startup_query_interval = intvl_jiffies; |
4743 | } |
4744 | |
4745 | /** |
4746 | * br_multicast_list_adjacent - Returns snooped multicast addresses |
4747 | * @dev: The bridge port adjacent to which to retrieve addresses |
4748 | * @br_ip_list: The list to store found, snooped multicast IP addresses in |
4749 | * |
4750 | * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast |
4751 | * snooping feature on all bridge ports of dev's bridge device, excluding |
4752 | * the addresses from dev itself. |
4753 | * |
4754 | * Returns the number of items added to br_ip_list. |
4755 | * |
4756 | * Notes: |
4757 | * - br_ip_list needs to be initialized by caller |
4758 | * - br_ip_list might contain duplicates in the end |
4759 | * (needs to be taken care of by caller) |
4760 | * - br_ip_list needs to be freed by caller |
4761 | */ |
4762 | int br_multicast_list_adjacent(struct net_device *dev, |
4763 | struct list_head *br_ip_list) |
4764 | { |
4765 | struct net_bridge *br; |
4766 | struct net_bridge_port *port; |
4767 | struct net_bridge_port_group *group; |
4768 | struct br_ip_list *entry; |
4769 | int count = 0; |
4770 | |
4771 | rcu_read_lock(); |
4772 | if (!br_ip_list || !netif_is_bridge_port(dev)) |
4773 | goto unlock; |
4774 | |
4775 | port = br_port_get_rcu(dev); |
4776 | if (!port || !port->br) |
4777 | goto unlock; |
4778 | |
4779 | br = port->br; |
4780 | |
4781 | list_for_each_entry_rcu(port, &br->port_list, list) { |
4782 | if (!port->dev || port->dev == dev) |
4783 | continue; |
4784 | |
4785 | hlist_for_each_entry_rcu(group, &port->mglist, mglist) { |
4786 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
4787 | if (!entry) |
4788 | goto unlock; |
4789 | |
4790 | entry->addr = group->key.addr; |
4791 | list_add(new: &entry->list, head: br_ip_list); |
4792 | count++; |
4793 | } |
4794 | } |
4795 | |
4796 | unlock: |
4797 | rcu_read_unlock(); |
4798 | return count; |
4799 | } |
4800 | EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); |
4801 | |
4802 | /** |
4803 | * br_multicast_has_querier_anywhere - Checks for a querier on a bridge |
4804 | * @dev: The bridge port providing the bridge on which to check for a querier |
4805 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4806 | * |
4807 | * Checks whether the given interface has a bridge on top and if so returns |
4808 | * true if a valid querier exists anywhere on the bridged link layer. |
4809 | * Otherwise returns false. |
4810 | */ |
4811 | bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) |
4812 | { |
4813 | struct net_bridge *br; |
4814 | struct net_bridge_port *port; |
4815 | struct ethhdr eth; |
4816 | bool ret = false; |
4817 | |
4818 | rcu_read_lock(); |
4819 | if (!netif_is_bridge_port(dev)) |
4820 | goto unlock; |
4821 | |
4822 | port = br_port_get_rcu(dev); |
4823 | if (!port || !port->br) |
4824 | goto unlock; |
4825 | |
4826 | br = port->br; |
4827 | |
4828 | memset(ð, 0, sizeof(eth)); |
4829 | eth.h_proto = htons(proto); |
4830 | |
4831 | ret = br_multicast_querier_exists(brmctx: &br->multicast_ctx, eth: ð, NULL); |
4832 | |
4833 | unlock: |
4834 | rcu_read_unlock(); |
4835 | return ret; |
4836 | } |
4837 | EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); |
4838 | |
4839 | /** |
4840 | * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port |
4841 | * @dev: The bridge port adjacent to which to check for a querier |
4842 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4843 | * |
4844 | * Checks whether the given interface has a bridge on top and if so returns |
4845 | * true if a selected querier is behind one of the other ports of this |
4846 | * bridge. Otherwise returns false. |
4847 | */ |
4848 | bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) |
4849 | { |
4850 | struct net_bridge_mcast *brmctx; |
4851 | struct net_bridge *br; |
4852 | struct net_bridge_port *port; |
4853 | bool ret = false; |
4854 | int port_ifidx; |
4855 | |
4856 | rcu_read_lock(); |
4857 | if (!netif_is_bridge_port(dev)) |
4858 | goto unlock; |
4859 | |
4860 | port = br_port_get_rcu(dev); |
4861 | if (!port || !port->br) |
4862 | goto unlock; |
4863 | |
4864 | br = port->br; |
4865 | brmctx = &br->multicast_ctx; |
4866 | |
4867 | switch (proto) { |
4868 | case ETH_P_IP: |
4869 | port_ifidx = brmctx->ip4_querier.port_ifidx; |
4870 | if (!timer_pending(timer: &brmctx->ip4_other_query.timer) || |
4871 | port_ifidx == port->dev->ifindex) |
4872 | goto unlock; |
4873 | break; |
4874 | #if IS_ENABLED(CONFIG_IPV6) |
4875 | case ETH_P_IPV6: |
4876 | port_ifidx = brmctx->ip6_querier.port_ifidx; |
4877 | if (!timer_pending(timer: &brmctx->ip6_other_query.timer) || |
4878 | port_ifidx == port->dev->ifindex) |
4879 | goto unlock; |
4880 | break; |
4881 | #endif |
4882 | default: |
4883 | goto unlock; |
4884 | } |
4885 | |
4886 | ret = true; |
4887 | unlock: |
4888 | rcu_read_unlock(); |
4889 | return ret; |
4890 | } |
4891 | EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); |
4892 | |
4893 | /** |
4894 | * br_multicast_has_router_adjacent - Checks for a router behind a bridge port |
4895 | * @dev: The bridge port adjacent to which to check for a multicast router |
4896 | * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 |
4897 | * |
4898 | * Checks whether the given interface has a bridge on top and if so returns |
4899 | * true if a multicast router is behind one of the other ports of this |
4900 | * bridge. Otherwise returns false. |
4901 | */ |
4902 | bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) |
4903 | { |
4904 | struct net_bridge_mcast_port *pmctx; |
4905 | struct net_bridge_mcast *brmctx; |
4906 | struct net_bridge_port *port; |
4907 | bool ret = false; |
4908 | |
4909 | rcu_read_lock(); |
4910 | port = br_port_get_check_rcu(dev); |
4911 | if (!port) |
4912 | goto unlock; |
4913 | |
4914 | brmctx = &port->br->multicast_ctx; |
4915 | switch (proto) { |
4916 | case ETH_P_IP: |
4917 | hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, |
4918 | ip4_rlist) { |
4919 | if (pmctx->port == port) |
4920 | continue; |
4921 | |
4922 | ret = true; |
4923 | goto unlock; |
4924 | } |
4925 | break; |
4926 | #if IS_ENABLED(CONFIG_IPV6) |
4927 | case ETH_P_IPV6: |
4928 | hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, |
4929 | ip6_rlist) { |
4930 | if (pmctx->port == port) |
4931 | continue; |
4932 | |
4933 | ret = true; |
4934 | goto unlock; |
4935 | } |
4936 | break; |
4937 | #endif |
4938 | default: |
4939 | /* when compiled without IPv6 support, be conservative and |
4940 | * always assume presence of an IPv6 multicast router |
4941 | */ |
4942 | ret = true; |
4943 | } |
4944 | |
4945 | unlock: |
4946 | rcu_read_unlock(); |
4947 | return ret; |
4948 | } |
4949 | EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); |
4950 | |
4951 | static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, |
4952 | const struct sk_buff *skb, u8 type, u8 dir) |
4953 | { |
4954 | struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); |
4955 | __be16 proto = skb->protocol; |
4956 | unsigned int t_len; |
4957 | |
4958 | u64_stats_update_begin(syncp: &pstats->syncp); |
4959 | switch (proto) { |
4960 | case htons(ETH_P_IP): |
4961 | t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); |
4962 | switch (type) { |
4963 | case IGMP_HOST_MEMBERSHIP_REPORT: |
4964 | pstats->mstats.igmp_v1reports[dir]++; |
4965 | break; |
4966 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
4967 | pstats->mstats.igmp_v2reports[dir]++; |
4968 | break; |
4969 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
4970 | pstats->mstats.igmp_v3reports[dir]++; |
4971 | break; |
4972 | case IGMP_HOST_MEMBERSHIP_QUERY: |
4973 | if (t_len != sizeof(struct igmphdr)) { |
4974 | pstats->mstats.igmp_v3queries[dir]++; |
4975 | } else { |
4976 | unsigned int offset = skb_transport_offset(skb); |
4977 | struct igmphdr *ih, _ihdr; |
4978 | |
4979 | ih = skb_header_pointer(skb, offset, |
4980 | len: sizeof(_ihdr), buffer: &_ihdr); |
4981 | if (!ih) |
4982 | break; |
4983 | if (!ih->code) |
4984 | pstats->mstats.igmp_v1queries[dir]++; |
4985 | else |
4986 | pstats->mstats.igmp_v2queries[dir]++; |
4987 | } |
4988 | break; |
4989 | case IGMP_HOST_LEAVE_MESSAGE: |
4990 | pstats->mstats.igmp_leaves[dir]++; |
4991 | break; |
4992 | } |
4993 | break; |
4994 | #if IS_ENABLED(CONFIG_IPV6) |
4995 | case htons(ETH_P_IPV6): |
4996 | t_len = ntohs(ipv6_hdr(skb)->payload_len) + |
4997 | sizeof(struct ipv6hdr); |
4998 | t_len -= skb_network_header_len(skb); |
4999 | switch (type) { |
5000 | case ICMPV6_MGM_REPORT: |
5001 | pstats->mstats.mld_v1reports[dir]++; |
5002 | break; |
5003 | case ICMPV6_MLD2_REPORT: |
5004 | pstats->mstats.mld_v2reports[dir]++; |
5005 | break; |
5006 | case ICMPV6_MGM_QUERY: |
5007 | if (t_len != sizeof(struct mld_msg)) |
5008 | pstats->mstats.mld_v2queries[dir]++; |
5009 | else |
5010 | pstats->mstats.mld_v1queries[dir]++; |
5011 | break; |
5012 | case ICMPV6_MGM_REDUCTION: |
5013 | pstats->mstats.mld_leaves[dir]++; |
5014 | break; |
5015 | } |
5016 | break; |
5017 | #endif /* CONFIG_IPV6 */ |
5018 | } |
5019 | u64_stats_update_end(syncp: &pstats->syncp); |
5020 | } |
5021 | |
5022 | void br_multicast_count(struct net_bridge *br, |
5023 | const struct net_bridge_port *p, |
5024 | const struct sk_buff *skb, u8 type, u8 dir) |
5025 | { |
5026 | struct bridge_mcast_stats __percpu *stats; |
5027 | |
5028 | /* if multicast_disabled is true then igmp type can't be set */ |
5029 | if (!type || !br_opt_get(br, opt: BROPT_MULTICAST_STATS_ENABLED)) |
5030 | return; |
5031 | |
5032 | if (p) |
5033 | stats = p->mcast_stats; |
5034 | else |
5035 | stats = br->mcast_stats; |
5036 | if (WARN_ON(!stats)) |
5037 | return; |
5038 | |
5039 | br_mcast_stats_add(stats, skb, type, dir); |
5040 | } |
5041 | |
5042 | int br_multicast_init_stats(struct net_bridge *br) |
5043 | { |
5044 | br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); |
5045 | if (!br->mcast_stats) |
5046 | return -ENOMEM; |
5047 | |
5048 | return 0; |
5049 | } |
5050 | |
5051 | void br_multicast_uninit_stats(struct net_bridge *br) |
5052 | { |
5053 | free_percpu(pdata: br->mcast_stats); |
5054 | } |
5055 | |
5056 | /* noinline for https://llvm.org/pr45802#c9 */ |
5057 | static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) |
5058 | { |
5059 | dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; |
5060 | dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; |
5061 | } |
5062 | |
5063 | void br_multicast_get_stats(const struct net_bridge *br, |
5064 | const struct net_bridge_port *p, |
5065 | struct br_mcast_stats *dest) |
5066 | { |
5067 | struct bridge_mcast_stats __percpu *stats; |
5068 | struct br_mcast_stats tdst; |
5069 | int i; |
5070 | |
5071 | memset(dest, 0, sizeof(*dest)); |
5072 | if (p) |
5073 | stats = p->mcast_stats; |
5074 | else |
5075 | stats = br->mcast_stats; |
5076 | if (WARN_ON(!stats)) |
5077 | return; |
5078 | |
5079 | memset(&tdst, 0, sizeof(tdst)); |
5080 | for_each_possible_cpu(i) { |
5081 | struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); |
5082 | struct br_mcast_stats temp; |
5083 | unsigned int start; |
5084 | |
5085 | do { |
5086 | start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp); |
5087 | memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); |
5088 | } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start)); |
5089 | |
5090 | mcast_stats_add_dir(dst: tdst.igmp_v1queries, src: temp.igmp_v1queries); |
5091 | mcast_stats_add_dir(dst: tdst.igmp_v2queries, src: temp.igmp_v2queries); |
5092 | mcast_stats_add_dir(dst: tdst.igmp_v3queries, src: temp.igmp_v3queries); |
5093 | mcast_stats_add_dir(dst: tdst.igmp_leaves, src: temp.igmp_leaves); |
5094 | mcast_stats_add_dir(dst: tdst.igmp_v1reports, src: temp.igmp_v1reports); |
5095 | mcast_stats_add_dir(dst: tdst.igmp_v2reports, src: temp.igmp_v2reports); |
5096 | mcast_stats_add_dir(dst: tdst.igmp_v3reports, src: temp.igmp_v3reports); |
5097 | tdst.igmp_parse_errors += temp.igmp_parse_errors; |
5098 | |
5099 | mcast_stats_add_dir(dst: tdst.mld_v1queries, src: temp.mld_v1queries); |
5100 | mcast_stats_add_dir(dst: tdst.mld_v2queries, src: temp.mld_v2queries); |
5101 | mcast_stats_add_dir(dst: tdst.mld_leaves, src: temp.mld_leaves); |
5102 | mcast_stats_add_dir(dst: tdst.mld_v1reports, src: temp.mld_v1reports); |
5103 | mcast_stats_add_dir(dst: tdst.mld_v2reports, src: temp.mld_v2reports); |
5104 | tdst.mld_parse_errors += temp.mld_parse_errors; |
5105 | } |
5106 | memcpy(dest, &tdst, sizeof(*dest)); |
5107 | } |
5108 | |
5109 | int br_mdb_hash_init(struct net_bridge *br) |
5110 | { |
5111 | int err; |
5112 | |
5113 | err = rhashtable_init(ht: &br->sg_port_tbl, params: &br_sg_port_rht_params); |
5114 | if (err) |
5115 | return err; |
5116 | |
5117 | err = rhashtable_init(ht: &br->mdb_hash_tbl, params: &br_mdb_rht_params); |
5118 | if (err) { |
5119 | rhashtable_destroy(ht: &br->sg_port_tbl); |
5120 | return err; |
5121 | } |
5122 | |
5123 | return 0; |
5124 | } |
5125 | |
5126 | void br_mdb_hash_fini(struct net_bridge *br) |
5127 | { |
5128 | rhashtable_destroy(ht: &br->sg_port_tbl); |
5129 | rhashtable_destroy(ht: &br->mdb_hash_tbl); |
5130 | } |
5131 |
Definitions
- br_mdb_rht_params
- br_sg_port_rht_params
- br_sg_port_find
- br_mdb_ip_get_rcu
- br_mdb_ip_get
- br_mdb_ip4_get
- br_mdb_ip6_get
- br_mdb_entry_skb_get
- br_multicast_pg_to_port_ctx
- br_multicast_port_vid_to_port_ctx
- br_multicast_ctx_should_use
- br_port_group_equal
- __fwd_add_star_excl
- __fwd_del_star_excl
- br_multicast_star_g_handle_mode
- br_multicast_sg_host_state
- br_multicast_star_g_host_state
- br_multicast_sg_del_exclude_ports
- br_multicast_sg_add_exclude_ports
- br_multicast_fwd_src_add
- br_multicast_fwd_src_remove
- br_multicast_fwd_src_handle
- br_multicast_destroy_mdb_entry
- br_multicast_del_mdb_entry
- br_multicast_group_expired
- br_multicast_destroy_group_src
- __br_multicast_del_group_src
- br_multicast_del_group_src
- br_multicast_port_ngroups_inc_one
- br_multicast_port_ngroups_dec_one
- br_multicast_port_ngroups_inc
- br_multicast_port_ngroups_dec
- br_multicast_ngroups_get
- br_multicast_ngroups_set_max
- br_multicast_ngroups_get_max
- br_multicast_destroy_port_group
- br_multicast_del_pg
- br_multicast_find_del_pg
- br_multicast_port_group_expired
- br_multicast_gc
- __br_multicast_query_handle_vlan
- br_ip4_multicast_alloc_query
- br_ip6_multicast_alloc_query
- br_multicast_alloc_query
- br_multicast_new_group
- br_multicast_group_src_expired
- br_multicast_find_group_src
- br_multicast_new_group_src
- br_multicast_new_port_group
- br_multicast_del_port_group
- br_multicast_host_join
- br_multicast_host_leave
- __br_multicast_add_group
- br_multicast_add_group
- br_ip4_multicast_add_group
- br_ip6_multicast_add_group
- br_multicast_rport_del
- br_ip4_multicast_rport_del
- br_ip6_multicast_rport_del
- br_multicast_router_expired
- br_ip4_multicast_router_expired
- br_ip6_multicast_router_expired
- br_mc_router_state_change
- br_multicast_local_router_expired
- br_ip4_multicast_local_router_expired
- br_ip6_multicast_local_router_expired
- br_multicast_querier_expired
- br_ip4_multicast_querier_expired
- br_ip6_multicast_querier_expired
- br_multicast_query_delay_expired
- br_multicast_select_own_querier
- __br_multicast_send_query
- br_multicast_read_querier
- br_multicast_update_querier
- br_multicast_send_query
- br_multicast_port_query_expired
- br_ip4_multicast_port_query_expired
- br_ip6_multicast_port_query_expired
- br_multicast_port_group_rexmit
- br_mc_disabled_update
- br_multicast_port_ctx_init
- br_multicast_port_ctx_deinit
- br_multicast_add_port
- br_multicast_del_port
- br_multicast_enable
- __br_multicast_enable_port_ctx
- br_multicast_enable_port
- __br_multicast_disable_port_ctx
- br_multicast_disable_port
- __grp_src_delete_marked
- __grp_src_mod_timer
- __grp_src_query_marked_and_rexmit
- __grp_send_query_and_rexmit
- br_multicast_isinc_allow
- __grp_src_isexc_incl
- __grp_src_isexc_excl
- br_multicast_isexc
- __grp_src_toin_incl
- __grp_src_toin_excl
- br_multicast_toin
- __grp_src_toex_incl
- __grp_src_toex_excl
- br_multicast_toex
- __grp_src_block_incl
- __grp_src_block_excl
- br_multicast_block
- br_multicast_find_port
- br_ip4_multicast_igmp3_report
- br_ip6_multicast_mld2_report
- br_multicast_select_querier
- __br_multicast_get_querier_port
- br_multicast_querier_state_size
- br_multicast_dump_querier_state
- br_multicast_update_query_timer
- br_port_mc_router_state_change
- br_multicast_rport_from_node
- br_multicast_get_rport_slot
- br_multicast_no_router_otherpf
- br_multicast_add_router
- br_ip4_multicast_add_router
- br_ip6_multicast_add_router
- br_multicast_mark_router
- br_ip4_multicast_mark_router
- br_ip6_multicast_mark_router
- br_ip4_multicast_query_received
- br_ip6_multicast_query_received
- br_ip4_multicast_query
- br_ip6_multicast_query
- br_multicast_leave_group
- br_ip4_multicast_leave_group
- br_ip6_multicast_leave_group
- br_multicast_err_count
- br_multicast_pim
- br_ip4_multicast_mrd_rcv
- br_multicast_ipv4_rcv
- br_ip6_multicast_mrd_rcv
- br_multicast_ipv6_rcv
- br_multicast_rcv
- br_multicast_query_expired
- br_ip4_multicast_query_expired
- br_ip6_multicast_query_expired
- br_multicast_gc_work
- br_multicast_ctx_init
- br_multicast_ctx_deinit
- br_multicast_init
- br_ip4_multicast_join_snoopers
- br_ip6_multicast_join_snoopers
- br_multicast_join_snoopers
- br_ip4_multicast_leave_snoopers
- br_ip6_multicast_leave_snoopers
- br_multicast_leave_snoopers
- __br_multicast_open_query
- __br_multicast_open
- br_multicast_open
- __br_multicast_stop
- br_multicast_toggle_one_vlan
- br_multicast_toggle_vlan
- br_multicast_toggle_vlan_snooping
- br_multicast_toggle_global_vlan
- br_multicast_stop
- br_multicast_dev_del
- br_multicast_set_router
- br_multicast_rport_del_notify
- br_multicast_set_port_router
- br_multicast_set_vlan_router
- br_multicast_start_querier
- br_multicast_toggle
- br_multicast_enabled
- br_multicast_router
- br_multicast_set_querier
- br_multicast_set_igmp_version
- br_multicast_set_mld_version
- br_multicast_set_query_intvl
- br_multicast_set_startup_query_intvl
- br_multicast_list_adjacent
- br_multicast_has_querier_anywhere
- br_multicast_has_querier_adjacent
- br_multicast_has_router_adjacent
- br_mcast_stats_add
- br_multicast_count
- br_multicast_init_stats
- br_multicast_uninit_stats
- mcast_stats_add_dir
- br_multicast_get_stats
- br_mdb_hash_init
Improve your Profiling and Debugging skills
Find out more