1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * DSA topology and switch handling |
4 | * |
5 | * Copyright (c) 2008-2009 Marvell Semiconductor |
6 | * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> |
7 | * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> |
8 | */ |
9 | |
10 | #include <linux/device.h> |
11 | #include <linux/err.h> |
12 | #include <linux/list.h> |
13 | #include <linux/module.h> |
14 | #include <linux/netdevice.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/rtnetlink.h> |
17 | #include <linux/of.h> |
18 | #include <linux/of_net.h> |
19 | #include <net/dsa_stubs.h> |
20 | #include <net/sch_generic.h> |
21 | |
22 | #include "conduit.h" |
23 | #include "devlink.h" |
24 | #include "dsa.h" |
25 | #include "netlink.h" |
26 | #include "port.h" |
27 | #include "switch.h" |
28 | #include "tag.h" |
29 | #include "user.h" |
30 | |
31 | #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG |
32 | |
33 | static DEFINE_MUTEX(dsa2_mutex); |
34 | LIST_HEAD(dsa_tree_list); |
35 | |
36 | static struct workqueue_struct *dsa_owq; |
37 | |
38 | /* Track the bridges with forwarding offload enabled */ |
39 | static unsigned long dsa_fwd_offloading_bridges; |
40 | |
41 | bool dsa_schedule_work(struct work_struct *work) |
42 | { |
43 | return queue_work(wq: dsa_owq, work); |
44 | } |
45 | |
46 | void dsa_flush_workqueue(void) |
47 | { |
48 | flush_workqueue(dsa_owq); |
49 | } |
50 | EXPORT_SYMBOL_GPL(dsa_flush_workqueue); |
51 | |
52 | /** |
53 | * dsa_lag_map() - Map LAG structure to a linear LAG array |
54 | * @dst: Tree in which to record the mapping. |
55 | * @lag: LAG structure that is to be mapped to the tree's array. |
56 | * |
57 | * dsa_lag_id/dsa_lag_by_id can then be used to translate between the |
58 | * two spaces. The size of the mapping space is determined by the |
59 | * driver by setting ds->num_lag_ids. It is perfectly legal to leave |
60 | * it unset if it is not needed, in which case these functions become |
61 | * no-ops. |
62 | */ |
63 | void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag) |
64 | { |
65 | unsigned int id; |
66 | |
67 | for (id = 1; id <= dst->lags_len; id++) { |
68 | if (!dsa_lag_by_id(dst, id)) { |
69 | dst->lags[id - 1] = lag; |
70 | lag->id = id; |
71 | return; |
72 | } |
73 | } |
74 | |
75 | /* No IDs left, which is OK. Some drivers do not need it. The |
76 | * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id |
77 | * returns an error for this device when joining the LAG. The |
78 | * driver can then return -EOPNOTSUPP back to DSA, which will |
79 | * fall back to a software LAG. |
80 | */ |
81 | } |
82 | |
83 | /** |
84 | * dsa_lag_unmap() - Remove a LAG ID mapping |
85 | * @dst: Tree in which the mapping is recorded. |
86 | * @lag: LAG structure that was mapped. |
87 | * |
88 | * As there may be multiple users of the mapping, it is only removed |
89 | * if there are no other references to it. |
90 | */ |
91 | void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag) |
92 | { |
93 | unsigned int id; |
94 | |
95 | dsa_lags_foreach_id(id, dst) { |
96 | if (dsa_lag_by_id(dst, id) == lag) { |
97 | dst->lags[id - 1] = NULL; |
98 | lag->id = 0; |
99 | break; |
100 | } |
101 | } |
102 | } |
103 | |
104 | struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst, |
105 | const struct net_device *lag_dev) |
106 | { |
107 | struct dsa_port *dp; |
108 | |
109 | list_for_each_entry(dp, &dst->ports, list) |
110 | if (dsa_port_lag_dev_get(dp) == lag_dev) |
111 | return dp->lag; |
112 | |
113 | return NULL; |
114 | } |
115 | |
116 | struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, |
117 | const struct net_device *br) |
118 | { |
119 | struct dsa_port *dp; |
120 | |
121 | list_for_each_entry(dp, &dst->ports, list) |
122 | if (dsa_port_bridge_dev_get(dp) == br) |
123 | return dp->bridge; |
124 | |
125 | return NULL; |
126 | } |
127 | |
128 | static int dsa_bridge_num_find(const struct net_device *bridge_dev) |
129 | { |
130 | struct dsa_switch_tree *dst; |
131 | |
132 | list_for_each_entry(dst, &dsa_tree_list, list) { |
133 | struct dsa_bridge *bridge; |
134 | |
135 | bridge = dsa_tree_bridge_find(dst, br: bridge_dev); |
136 | if (bridge) |
137 | return bridge->num; |
138 | } |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) |
144 | { |
145 | unsigned int bridge_num = dsa_bridge_num_find(bridge_dev); |
146 | |
147 | /* Switches without FDB isolation support don't get unique |
148 | * bridge numbering |
149 | */ |
150 | if (!max) |
151 | return 0; |
152 | |
153 | if (!bridge_num) { |
154 | /* First port that requests FDB isolation or TX forwarding |
155 | * offload for this bridge |
156 | */ |
157 | bridge_num = find_next_zero_bit(addr: &dsa_fwd_offloading_bridges, |
158 | DSA_MAX_NUM_OFFLOADING_BRIDGES, |
159 | offset: 1); |
160 | if (bridge_num >= max) |
161 | return 0; |
162 | |
163 | set_bit(nr: bridge_num, addr: &dsa_fwd_offloading_bridges); |
164 | } |
165 | |
166 | return bridge_num; |
167 | } |
168 | |
169 | void dsa_bridge_num_put(const struct net_device *bridge_dev, |
170 | unsigned int bridge_num) |
171 | { |
172 | /* Since we refcount bridges, we know that when we call this function |
173 | * it is no longer in use, so we can just go ahead and remove it from |
174 | * the bit mask. |
175 | */ |
176 | clear_bit(nr: bridge_num, addr: &dsa_fwd_offloading_bridges); |
177 | } |
178 | |
179 | struct dsa_switch *dsa_switch_find(int tree_index, int sw_index) |
180 | { |
181 | struct dsa_switch_tree *dst; |
182 | struct dsa_port *dp; |
183 | |
184 | list_for_each_entry(dst, &dsa_tree_list, list) { |
185 | if (dst->index != tree_index) |
186 | continue; |
187 | |
188 | list_for_each_entry(dp, &dst->ports, list) { |
189 | if (dp->ds->index != sw_index) |
190 | continue; |
191 | |
192 | return dp->ds; |
193 | } |
194 | } |
195 | |
196 | return NULL; |
197 | } |
198 | EXPORT_SYMBOL_GPL(dsa_switch_find); |
199 | |
200 | static struct dsa_switch_tree *dsa_tree_find(int index) |
201 | { |
202 | struct dsa_switch_tree *dst; |
203 | |
204 | list_for_each_entry(dst, &dsa_tree_list, list) |
205 | if (dst->index == index) |
206 | return dst; |
207 | |
208 | return NULL; |
209 | } |
210 | |
211 | static struct dsa_switch_tree *dsa_tree_alloc(int index) |
212 | { |
213 | struct dsa_switch_tree *dst; |
214 | |
215 | dst = kzalloc(size: sizeof(*dst), GFP_KERNEL); |
216 | if (!dst) |
217 | return NULL; |
218 | |
219 | dst->index = index; |
220 | |
221 | INIT_LIST_HEAD(list: &dst->rtable); |
222 | |
223 | INIT_LIST_HEAD(list: &dst->ports); |
224 | |
225 | INIT_LIST_HEAD(list: &dst->list); |
226 | list_add_tail(new: &dst->list, head: &dsa_tree_list); |
227 | |
228 | kref_init(kref: &dst->refcount); |
229 | |
230 | return dst; |
231 | } |
232 | |
233 | static void dsa_tree_free(struct dsa_switch_tree *dst) |
234 | { |
235 | if (dst->tag_ops) |
236 | dsa_tag_driver_put(ops: dst->tag_ops); |
237 | list_del(entry: &dst->list); |
238 | kfree(objp: dst); |
239 | } |
240 | |
241 | static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst) |
242 | { |
243 | if (dst) |
244 | kref_get(kref: &dst->refcount); |
245 | |
246 | return dst; |
247 | } |
248 | |
249 | static struct dsa_switch_tree *dsa_tree_touch(int index) |
250 | { |
251 | struct dsa_switch_tree *dst; |
252 | |
253 | dst = dsa_tree_find(index); |
254 | if (dst) |
255 | return dsa_tree_get(dst); |
256 | else |
257 | return dsa_tree_alloc(index); |
258 | } |
259 | |
260 | static void dsa_tree_release(struct kref *ref) |
261 | { |
262 | struct dsa_switch_tree *dst; |
263 | |
264 | dst = container_of(ref, struct dsa_switch_tree, refcount); |
265 | |
266 | dsa_tree_free(dst); |
267 | } |
268 | |
269 | static void dsa_tree_put(struct dsa_switch_tree *dst) |
270 | { |
271 | if (dst) |
272 | kref_put(kref: &dst->refcount, release: dsa_tree_release); |
273 | } |
274 | |
275 | static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst, |
276 | struct device_node *dn) |
277 | { |
278 | struct dsa_port *dp; |
279 | |
280 | list_for_each_entry(dp, &dst->ports, list) |
281 | if (dp->dn == dn) |
282 | return dp; |
283 | |
284 | return NULL; |
285 | } |
286 | |
287 | static struct dsa_link *dsa_link_touch(struct dsa_port *dp, |
288 | struct dsa_port *link_dp) |
289 | { |
290 | struct dsa_switch *ds = dp->ds; |
291 | struct dsa_switch_tree *dst; |
292 | struct dsa_link *dl; |
293 | |
294 | dst = ds->dst; |
295 | |
296 | list_for_each_entry(dl, &dst->rtable, list) |
297 | if (dl->dp == dp && dl->link_dp == link_dp) |
298 | return dl; |
299 | |
300 | dl = kzalloc(size: sizeof(*dl), GFP_KERNEL); |
301 | if (!dl) |
302 | return NULL; |
303 | |
304 | dl->dp = dp; |
305 | dl->link_dp = link_dp; |
306 | |
307 | INIT_LIST_HEAD(list: &dl->list); |
308 | list_add_tail(new: &dl->list, head: &dst->rtable); |
309 | |
310 | return dl; |
311 | } |
312 | |
313 | static bool dsa_port_setup_routing_table(struct dsa_port *dp) |
314 | { |
315 | struct dsa_switch *ds = dp->ds; |
316 | struct dsa_switch_tree *dst = ds->dst; |
317 | struct device_node *dn = dp->dn; |
318 | struct of_phandle_iterator it; |
319 | struct dsa_port *link_dp; |
320 | struct dsa_link *dl; |
321 | int err; |
322 | |
323 | of_for_each_phandle(&it, err, dn, "link" , NULL, 0) { |
324 | link_dp = dsa_tree_find_port_by_node(dst, dn: it.node); |
325 | if (!link_dp) { |
326 | of_node_put(node: it.node); |
327 | return false; |
328 | } |
329 | |
330 | dl = dsa_link_touch(dp, link_dp); |
331 | if (!dl) { |
332 | of_node_put(node: it.node); |
333 | return false; |
334 | } |
335 | } |
336 | |
337 | return true; |
338 | } |
339 | |
340 | static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst) |
341 | { |
342 | bool complete = true; |
343 | struct dsa_port *dp; |
344 | |
345 | list_for_each_entry(dp, &dst->ports, list) { |
346 | if (dsa_port_is_dsa(port: dp)) { |
347 | complete = dsa_port_setup_routing_table(dp); |
348 | if (!complete) |
349 | break; |
350 | } |
351 | } |
352 | |
353 | return complete; |
354 | } |
355 | |
356 | static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst) |
357 | { |
358 | struct dsa_port *dp; |
359 | |
360 | list_for_each_entry(dp, &dst->ports, list) |
361 | if (dsa_port_is_cpu(port: dp)) |
362 | return dp; |
363 | |
364 | return NULL; |
365 | } |
366 | |
367 | struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst) |
368 | { |
369 | struct device_node *ethernet; |
370 | struct net_device *conduit; |
371 | struct dsa_port *cpu_dp; |
372 | |
373 | cpu_dp = dsa_tree_find_first_cpu(dst); |
374 | ethernet = of_parse_phandle(np: cpu_dp->dn, phandle_name: "ethernet" , index: 0); |
375 | conduit = of_find_net_device_by_node(np: ethernet); |
376 | of_node_put(node: ethernet); |
377 | |
378 | return conduit; |
379 | } |
380 | |
381 | /* Assign the default CPU port (the first one in the tree) to all ports of the |
382 | * fabric which don't already have one as part of their own switch. |
383 | */ |
384 | static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst) |
385 | { |
386 | struct dsa_port *cpu_dp, *dp; |
387 | |
388 | cpu_dp = dsa_tree_find_first_cpu(dst); |
389 | if (!cpu_dp) { |
390 | pr_err("DSA: tree %d has no CPU port\n" , dst->index); |
391 | return -EINVAL; |
392 | } |
393 | |
394 | list_for_each_entry(dp, &dst->ports, list) { |
395 | if (dp->cpu_dp) |
396 | continue; |
397 | |
398 | if (dsa_port_is_user(dp) || dsa_port_is_dsa(port: dp)) |
399 | dp->cpu_dp = cpu_dp; |
400 | } |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | static struct dsa_port * |
406 | dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds) |
407 | { |
408 | struct dsa_port *cpu_dp; |
409 | |
410 | if (!ds->ops->preferred_default_local_cpu_port) |
411 | return NULL; |
412 | |
413 | cpu_dp = ds->ops->preferred_default_local_cpu_port(ds); |
414 | if (!cpu_dp) |
415 | return NULL; |
416 | |
417 | if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds)) |
418 | return NULL; |
419 | |
420 | return cpu_dp; |
421 | } |
422 | |
423 | /* Perform initial assignment of CPU ports to user ports and DSA links in the |
424 | * fabric, giving preference to CPU ports local to each switch. Default to |
425 | * using the first CPU port in the switch tree if the port does not have a CPU |
426 | * port local to this switch. |
427 | */ |
428 | static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst) |
429 | { |
430 | struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp; |
431 | |
432 | list_for_each_entry(cpu_dp, &dst->ports, list) { |
433 | if (!dsa_port_is_cpu(port: cpu_dp)) |
434 | continue; |
435 | |
436 | preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(ds: cpu_dp->ds); |
437 | if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp) |
438 | continue; |
439 | |
440 | /* Prefer a local CPU port */ |
441 | dsa_switch_for_each_port(dp, cpu_dp->ds) { |
442 | /* Prefer the first local CPU port found */ |
443 | if (dp->cpu_dp) |
444 | continue; |
445 | |
446 | if (dsa_port_is_user(dp) || dsa_port_is_dsa(port: dp)) |
447 | dp->cpu_dp = cpu_dp; |
448 | } |
449 | } |
450 | |
451 | return dsa_tree_setup_default_cpu(dst); |
452 | } |
453 | |
454 | static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst) |
455 | { |
456 | struct dsa_port *dp; |
457 | |
458 | list_for_each_entry(dp, &dst->ports, list) |
459 | if (dsa_port_is_user(dp) || dsa_port_is_dsa(port: dp)) |
460 | dp->cpu_dp = NULL; |
461 | } |
462 | |
463 | static int dsa_port_setup(struct dsa_port *dp) |
464 | { |
465 | bool dsa_port_link_registered = false; |
466 | struct dsa_switch *ds = dp->ds; |
467 | bool dsa_port_enabled = false; |
468 | int err = 0; |
469 | |
470 | if (dp->setup) |
471 | return 0; |
472 | |
473 | err = dsa_port_devlink_setup(dp); |
474 | if (err) |
475 | return err; |
476 | |
477 | switch (dp->type) { |
478 | case DSA_PORT_TYPE_UNUSED: |
479 | dsa_port_disable(dp); |
480 | break; |
481 | case DSA_PORT_TYPE_CPU: |
482 | if (dp->dn) { |
483 | err = dsa_shared_port_link_register_of(dp); |
484 | if (err) |
485 | break; |
486 | dsa_port_link_registered = true; |
487 | } else { |
488 | dev_warn(ds->dev, |
489 | "skipping link registration for CPU port %d\n" , |
490 | dp->index); |
491 | } |
492 | |
493 | err = dsa_port_enable(dp, NULL); |
494 | if (err) |
495 | break; |
496 | dsa_port_enabled = true; |
497 | |
498 | break; |
499 | case DSA_PORT_TYPE_DSA: |
500 | if (dp->dn) { |
501 | err = dsa_shared_port_link_register_of(dp); |
502 | if (err) |
503 | break; |
504 | dsa_port_link_registered = true; |
505 | } else { |
506 | dev_warn(ds->dev, |
507 | "skipping link registration for DSA port %d\n" , |
508 | dp->index); |
509 | } |
510 | |
511 | err = dsa_port_enable(dp, NULL); |
512 | if (err) |
513 | break; |
514 | dsa_port_enabled = true; |
515 | |
516 | break; |
517 | case DSA_PORT_TYPE_USER: |
518 | of_get_mac_address(np: dp->dn, mac: dp->mac); |
519 | err = dsa_user_create(dp); |
520 | break; |
521 | } |
522 | |
523 | if (err && dsa_port_enabled) |
524 | dsa_port_disable(dp); |
525 | if (err && dsa_port_link_registered) |
526 | dsa_shared_port_link_unregister_of(dp); |
527 | if (err) { |
528 | dsa_port_devlink_teardown(dp); |
529 | return err; |
530 | } |
531 | |
532 | dp->setup = true; |
533 | |
534 | return 0; |
535 | } |
536 | |
537 | static void dsa_port_teardown(struct dsa_port *dp) |
538 | { |
539 | if (!dp->setup) |
540 | return; |
541 | |
542 | switch (dp->type) { |
543 | case DSA_PORT_TYPE_UNUSED: |
544 | break; |
545 | case DSA_PORT_TYPE_CPU: |
546 | dsa_port_disable(dp); |
547 | if (dp->dn) |
548 | dsa_shared_port_link_unregister_of(dp); |
549 | break; |
550 | case DSA_PORT_TYPE_DSA: |
551 | dsa_port_disable(dp); |
552 | if (dp->dn) |
553 | dsa_shared_port_link_unregister_of(dp); |
554 | break; |
555 | case DSA_PORT_TYPE_USER: |
556 | if (dp->user) { |
557 | dsa_user_destroy(user_dev: dp->user); |
558 | dp->user = NULL; |
559 | } |
560 | break; |
561 | } |
562 | |
563 | dsa_port_devlink_teardown(dp); |
564 | |
565 | dp->setup = false; |
566 | } |
567 | |
568 | static int dsa_port_setup_as_unused(struct dsa_port *dp) |
569 | { |
570 | dp->type = DSA_PORT_TYPE_UNUSED; |
571 | return dsa_port_setup(dp); |
572 | } |
573 | |
574 | static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) |
575 | { |
576 | const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; |
577 | struct dsa_switch_tree *dst = ds->dst; |
578 | int err; |
579 | |
580 | if (tag_ops->proto == dst->default_proto) |
581 | goto connect; |
582 | |
583 | rtnl_lock(); |
584 | err = ds->ops->change_tag_protocol(ds, tag_ops->proto); |
585 | rtnl_unlock(); |
586 | if (err) { |
587 | dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n" , |
588 | tag_ops->name, ERR_PTR(err)); |
589 | return err; |
590 | } |
591 | |
592 | connect: |
593 | if (tag_ops->connect) { |
594 | err = tag_ops->connect(ds); |
595 | if (err) |
596 | return err; |
597 | } |
598 | |
599 | if (ds->ops->connect_tag_protocol) { |
600 | err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); |
601 | if (err) { |
602 | dev_err(ds->dev, |
603 | "Unable to connect to tag protocol \"%s\": %pe\n" , |
604 | tag_ops->name, ERR_PTR(err)); |
605 | goto disconnect; |
606 | } |
607 | } |
608 | |
609 | return 0; |
610 | |
611 | disconnect: |
612 | if (tag_ops->disconnect) |
613 | tag_ops->disconnect(ds); |
614 | |
615 | return err; |
616 | } |
617 | |
618 | static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds) |
619 | { |
620 | const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; |
621 | |
622 | if (tag_ops->disconnect) |
623 | tag_ops->disconnect(ds); |
624 | } |
625 | |
626 | static int dsa_switch_setup(struct dsa_switch *ds) |
627 | { |
628 | int err; |
629 | |
630 | if (ds->setup) |
631 | return 0; |
632 | |
633 | /* Initialize ds->phys_mii_mask before registering the user MDIO bus |
634 | * driver and before ops->setup() has run, since the switch drivers and |
635 | * the user MDIO bus driver rely on these values for probing PHY |
636 | * devices or not |
637 | */ |
638 | ds->phys_mii_mask |= dsa_user_ports(ds); |
639 | |
640 | err = dsa_switch_devlink_alloc(ds); |
641 | if (err) |
642 | return err; |
643 | |
644 | err = dsa_switch_register_notifier(ds); |
645 | if (err) |
646 | goto devlink_free; |
647 | |
648 | ds->configure_vlan_while_not_filtering = true; |
649 | |
650 | err = ds->ops->setup(ds); |
651 | if (err < 0) |
652 | goto unregister_notifier; |
653 | |
654 | err = dsa_switch_setup_tag_protocol(ds); |
655 | if (err) |
656 | goto teardown; |
657 | |
658 | if (!ds->user_mii_bus && ds->ops->phy_read) { |
659 | ds->user_mii_bus = mdiobus_alloc(); |
660 | if (!ds->user_mii_bus) { |
661 | err = -ENOMEM; |
662 | goto teardown; |
663 | } |
664 | |
665 | dsa_user_mii_bus_init(ds); |
666 | |
667 | err = mdiobus_register(ds->user_mii_bus); |
668 | if (err < 0) |
669 | goto free_user_mii_bus; |
670 | } |
671 | |
672 | dsa_switch_devlink_register(ds); |
673 | |
674 | ds->setup = true; |
675 | return 0; |
676 | |
677 | free_user_mii_bus: |
678 | if (ds->user_mii_bus && ds->ops->phy_read) |
679 | mdiobus_free(bus: ds->user_mii_bus); |
680 | teardown: |
681 | if (ds->ops->teardown) |
682 | ds->ops->teardown(ds); |
683 | unregister_notifier: |
684 | dsa_switch_unregister_notifier(ds); |
685 | devlink_free: |
686 | dsa_switch_devlink_free(ds); |
687 | return err; |
688 | } |
689 | |
690 | static void dsa_switch_teardown(struct dsa_switch *ds) |
691 | { |
692 | if (!ds->setup) |
693 | return; |
694 | |
695 | dsa_switch_devlink_unregister(ds); |
696 | |
697 | if (ds->user_mii_bus && ds->ops->phy_read) { |
698 | mdiobus_unregister(bus: ds->user_mii_bus); |
699 | mdiobus_free(bus: ds->user_mii_bus); |
700 | ds->user_mii_bus = NULL; |
701 | } |
702 | |
703 | dsa_switch_teardown_tag_protocol(ds); |
704 | |
705 | if (ds->ops->teardown) |
706 | ds->ops->teardown(ds); |
707 | |
708 | dsa_switch_unregister_notifier(ds); |
709 | |
710 | dsa_switch_devlink_free(ds); |
711 | |
712 | ds->setup = false; |
713 | } |
714 | |
715 | /* First tear down the non-shared, then the shared ports. This ensures that |
716 | * all work items scheduled by our switchdev handlers for user ports have |
717 | * completed before we destroy the refcounting kept on the shared ports. |
718 | */ |
719 | static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst) |
720 | { |
721 | struct dsa_port *dp; |
722 | |
723 | list_for_each_entry(dp, &dst->ports, list) |
724 | if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) |
725 | dsa_port_teardown(dp); |
726 | |
727 | dsa_flush_workqueue(); |
728 | |
729 | list_for_each_entry(dp, &dst->ports, list) |
730 | if (dsa_port_is_dsa(port: dp) || dsa_port_is_cpu(port: dp)) |
731 | dsa_port_teardown(dp); |
732 | } |
733 | |
734 | static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) |
735 | { |
736 | struct dsa_port *dp; |
737 | |
738 | list_for_each_entry(dp, &dst->ports, list) |
739 | dsa_switch_teardown(ds: dp->ds); |
740 | } |
741 | |
742 | /* Bring shared ports up first, then non-shared ports */ |
743 | static int dsa_tree_setup_ports(struct dsa_switch_tree *dst) |
744 | { |
745 | struct dsa_port *dp; |
746 | int err = 0; |
747 | |
748 | list_for_each_entry(dp, &dst->ports, list) { |
749 | if (dsa_port_is_dsa(port: dp) || dsa_port_is_cpu(port: dp)) { |
750 | err = dsa_port_setup(dp); |
751 | if (err) |
752 | goto teardown; |
753 | } |
754 | } |
755 | |
756 | list_for_each_entry(dp, &dst->ports, list) { |
757 | if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { |
758 | err = dsa_port_setup(dp); |
759 | if (err) { |
760 | err = dsa_port_setup_as_unused(dp); |
761 | if (err) |
762 | goto teardown; |
763 | } |
764 | } |
765 | } |
766 | |
767 | return 0; |
768 | |
769 | teardown: |
770 | dsa_tree_teardown_ports(dst); |
771 | |
772 | return err; |
773 | } |
774 | |
775 | static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) |
776 | { |
777 | struct dsa_port *dp; |
778 | int err = 0; |
779 | |
780 | list_for_each_entry(dp, &dst->ports, list) { |
781 | err = dsa_switch_setup(ds: dp->ds); |
782 | if (err) { |
783 | dsa_tree_teardown_switches(dst); |
784 | break; |
785 | } |
786 | } |
787 | |
788 | return err; |
789 | } |
790 | |
791 | static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst) |
792 | { |
793 | struct dsa_port *cpu_dp; |
794 | int err = 0; |
795 | |
796 | rtnl_lock(); |
797 | |
798 | dsa_tree_for_each_cpu_port(cpu_dp, dst) { |
799 | struct net_device *conduit = cpu_dp->conduit; |
800 | bool admin_up = (conduit->flags & IFF_UP) && |
801 | !qdisc_tx_is_noop(dev: conduit); |
802 | |
803 | err = dsa_conduit_setup(dev: conduit, cpu_dp); |
804 | if (err) |
805 | break; |
806 | |
807 | /* Replay conduit state event */ |
808 | dsa_tree_conduit_admin_state_change(dst, conduit, up: admin_up); |
809 | dsa_tree_conduit_oper_state_change(dst, conduit, |
810 | up: netif_oper_up(dev: conduit)); |
811 | } |
812 | |
813 | rtnl_unlock(); |
814 | |
815 | return err; |
816 | } |
817 | |
818 | static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst) |
819 | { |
820 | struct dsa_port *cpu_dp; |
821 | |
822 | rtnl_lock(); |
823 | |
824 | dsa_tree_for_each_cpu_port(cpu_dp, dst) { |
825 | struct net_device *conduit = cpu_dp->conduit; |
826 | |
827 | /* Synthesizing an "admin down" state is sufficient for |
828 | * the switches to get a notification if the conduit is |
829 | * currently up and running. |
830 | */ |
831 | dsa_tree_conduit_admin_state_change(dst, conduit, up: false); |
832 | |
833 | dsa_conduit_teardown(dev: conduit); |
834 | } |
835 | |
836 | rtnl_unlock(); |
837 | } |
838 | |
839 | static int dsa_tree_setup_lags(struct dsa_switch_tree *dst) |
840 | { |
841 | unsigned int len = 0; |
842 | struct dsa_port *dp; |
843 | |
844 | list_for_each_entry(dp, &dst->ports, list) { |
845 | if (dp->ds->num_lag_ids > len) |
846 | len = dp->ds->num_lag_ids; |
847 | } |
848 | |
849 | if (!len) |
850 | return 0; |
851 | |
852 | dst->lags = kcalloc(n: len, size: sizeof(*dst->lags), GFP_KERNEL); |
853 | if (!dst->lags) |
854 | return -ENOMEM; |
855 | |
856 | dst->lags_len = len; |
857 | return 0; |
858 | } |
859 | |
860 | static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst) |
861 | { |
862 | kfree(objp: dst->lags); |
863 | } |
864 | |
865 | static int dsa_tree_setup(struct dsa_switch_tree *dst) |
866 | { |
867 | bool complete; |
868 | int err; |
869 | |
870 | if (dst->setup) { |
871 | pr_err("DSA: tree %d already setup! Disjoint trees?\n" , |
872 | dst->index); |
873 | return -EEXIST; |
874 | } |
875 | |
876 | complete = dsa_tree_setup_routing_table(dst); |
877 | if (!complete) |
878 | return 0; |
879 | |
880 | err = dsa_tree_setup_cpu_ports(dst); |
881 | if (err) |
882 | return err; |
883 | |
884 | err = dsa_tree_setup_switches(dst); |
885 | if (err) |
886 | goto teardown_cpu_ports; |
887 | |
888 | err = dsa_tree_setup_ports(dst); |
889 | if (err) |
890 | goto teardown_switches; |
891 | |
892 | err = dsa_tree_setup_conduit(dst); |
893 | if (err) |
894 | goto teardown_ports; |
895 | |
896 | err = dsa_tree_setup_lags(dst); |
897 | if (err) |
898 | goto teardown_conduit; |
899 | |
900 | dst->setup = true; |
901 | |
902 | pr_info("DSA: tree %d setup\n" , dst->index); |
903 | |
904 | return 0; |
905 | |
906 | teardown_conduit: |
907 | dsa_tree_teardown_conduit(dst); |
908 | teardown_ports: |
909 | dsa_tree_teardown_ports(dst); |
910 | teardown_switches: |
911 | dsa_tree_teardown_switches(dst); |
912 | teardown_cpu_ports: |
913 | dsa_tree_teardown_cpu_ports(dst); |
914 | |
915 | return err; |
916 | } |
917 | |
918 | static void dsa_tree_teardown(struct dsa_switch_tree *dst) |
919 | { |
920 | struct dsa_link *dl, *next; |
921 | |
922 | if (!dst->setup) |
923 | return; |
924 | |
925 | dsa_tree_teardown_lags(dst); |
926 | |
927 | dsa_tree_teardown_conduit(dst); |
928 | |
929 | dsa_tree_teardown_ports(dst); |
930 | |
931 | dsa_tree_teardown_switches(dst); |
932 | |
933 | dsa_tree_teardown_cpu_ports(dst); |
934 | |
935 | list_for_each_entry_safe(dl, next, &dst->rtable, list) { |
936 | list_del(entry: &dl->list); |
937 | kfree(objp: dl); |
938 | } |
939 | |
940 | pr_info("DSA: tree %d torn down\n" , dst->index); |
941 | |
942 | dst->setup = false; |
943 | } |
944 | |
945 | static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, |
946 | const struct dsa_device_ops *tag_ops) |
947 | { |
948 | const struct dsa_device_ops *old_tag_ops = dst->tag_ops; |
949 | struct dsa_notifier_tag_proto_info info; |
950 | int err; |
951 | |
952 | dst->tag_ops = tag_ops; |
953 | |
954 | /* Notify the switches from this tree about the connection |
955 | * to the new tagger |
956 | */ |
957 | info.tag_ops = tag_ops; |
958 | err = dsa_tree_notify(dst, e: DSA_NOTIFIER_TAG_PROTO_CONNECT, v: &info); |
959 | if (err && err != -EOPNOTSUPP) |
960 | goto out_disconnect; |
961 | |
962 | /* Notify the old tagger about the disconnection from this tree */ |
963 | info.tag_ops = old_tag_ops; |
964 | dsa_tree_notify(dst, e: DSA_NOTIFIER_TAG_PROTO_DISCONNECT, v: &info); |
965 | |
966 | return 0; |
967 | |
968 | out_disconnect: |
969 | info.tag_ops = tag_ops; |
970 | dsa_tree_notify(dst, e: DSA_NOTIFIER_TAG_PROTO_DISCONNECT, v: &info); |
971 | dst->tag_ops = old_tag_ops; |
972 | |
973 | return err; |
974 | } |
975 | |
976 | /* Since the dsa/tagging sysfs device attribute is per conduit, the assumption |
977 | * is that all DSA switches within a tree share the same tagger, otherwise |
978 | * they would have formed disjoint trees (different "dsa,member" values). |
979 | */ |
980 | int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, |
981 | const struct dsa_device_ops *tag_ops, |
982 | const struct dsa_device_ops *old_tag_ops) |
983 | { |
984 | struct dsa_notifier_tag_proto_info info; |
985 | struct dsa_port *dp; |
986 | int err = -EBUSY; |
987 | |
988 | if (!rtnl_trylock()) |
989 | return restart_syscall(); |
990 | |
991 | /* At the moment we don't allow changing the tag protocol under |
992 | * traffic. The rtnl_mutex also happens to serialize concurrent |
993 | * attempts to change the tagging protocol. If we ever lift the IFF_UP |
994 | * restriction, there needs to be another mutex which serializes this. |
995 | */ |
996 | dsa_tree_for_each_user_port(dp, dst) { |
997 | if (dsa_port_to_conduit(dp)->flags & IFF_UP) |
998 | goto out_unlock; |
999 | |
1000 | if (dp->user->flags & IFF_UP) |
1001 | goto out_unlock; |
1002 | } |
1003 | |
1004 | /* Notify the tag protocol change */ |
1005 | info.tag_ops = tag_ops; |
1006 | err = dsa_tree_notify(dst, e: DSA_NOTIFIER_TAG_PROTO, v: &info); |
1007 | if (err) |
1008 | goto out_unwind_tagger; |
1009 | |
1010 | err = dsa_tree_bind_tag_proto(dst, tag_ops); |
1011 | if (err) |
1012 | goto out_unwind_tagger; |
1013 | |
1014 | rtnl_unlock(); |
1015 | |
1016 | return 0; |
1017 | |
1018 | out_unwind_tagger: |
1019 | info.tag_ops = old_tag_ops; |
1020 | dsa_tree_notify(dst, e: DSA_NOTIFIER_TAG_PROTO, v: &info); |
1021 | out_unlock: |
1022 | rtnl_unlock(); |
1023 | return err; |
1024 | } |
1025 | |
1026 | static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst, |
1027 | struct net_device *conduit) |
1028 | { |
1029 | struct dsa_notifier_conduit_state_info info; |
1030 | struct dsa_port *cpu_dp = conduit->dsa_ptr; |
1031 | |
1032 | info.conduit = conduit; |
1033 | info.operational = dsa_port_conduit_is_operational(dp: cpu_dp); |
1034 | |
1035 | dsa_tree_notify(dst, e: DSA_NOTIFIER_CONDUIT_STATE_CHANGE, v: &info); |
1036 | } |
1037 | |
1038 | void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst, |
1039 | struct net_device *conduit, |
1040 | bool up) |
1041 | { |
1042 | struct dsa_port *cpu_dp = conduit->dsa_ptr; |
1043 | bool notify = false; |
1044 | |
1045 | /* Don't keep track of admin state on LAG DSA conduits, |
1046 | * but rather just of physical DSA conduits |
1047 | */ |
1048 | if (netif_is_lag_master(dev: conduit)) |
1049 | return; |
1050 | |
1051 | if ((dsa_port_conduit_is_operational(dp: cpu_dp)) != |
1052 | (up && cpu_dp->conduit_oper_up)) |
1053 | notify = true; |
1054 | |
1055 | cpu_dp->conduit_admin_up = up; |
1056 | |
1057 | if (notify) |
1058 | dsa_tree_conduit_state_change(dst, conduit); |
1059 | } |
1060 | |
1061 | void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst, |
1062 | struct net_device *conduit, |
1063 | bool up) |
1064 | { |
1065 | struct dsa_port *cpu_dp = conduit->dsa_ptr; |
1066 | bool notify = false; |
1067 | |
1068 | /* Don't keep track of oper state on LAG DSA conduits, |
1069 | * but rather just of physical DSA conduits |
1070 | */ |
1071 | if (netif_is_lag_master(dev: conduit)) |
1072 | return; |
1073 | |
1074 | if ((dsa_port_conduit_is_operational(dp: cpu_dp)) != |
1075 | (cpu_dp->conduit_admin_up && up)) |
1076 | notify = true; |
1077 | |
1078 | cpu_dp->conduit_oper_up = up; |
1079 | |
1080 | if (notify) |
1081 | dsa_tree_conduit_state_change(dst, conduit); |
1082 | } |
1083 | |
1084 | static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) |
1085 | { |
1086 | struct dsa_switch_tree *dst = ds->dst; |
1087 | struct dsa_port *dp; |
1088 | |
1089 | dsa_switch_for_each_port(dp, ds) |
1090 | if (dp->index == index) |
1091 | return dp; |
1092 | |
1093 | dp = kzalloc(size: sizeof(*dp), GFP_KERNEL); |
1094 | if (!dp) |
1095 | return NULL; |
1096 | |
1097 | dp->ds = ds; |
1098 | dp->index = index; |
1099 | |
1100 | mutex_init(&dp->addr_lists_lock); |
1101 | mutex_init(&dp->vlans_lock); |
1102 | INIT_LIST_HEAD(list: &dp->fdbs); |
1103 | INIT_LIST_HEAD(list: &dp->mdbs); |
1104 | INIT_LIST_HEAD(list: &dp->vlans); /* also initializes &dp->user_vlans */ |
1105 | INIT_LIST_HEAD(list: &dp->list); |
1106 | list_add_tail(new: &dp->list, head: &dst->ports); |
1107 | |
1108 | return dp; |
1109 | } |
1110 | |
1111 | static int dsa_port_parse_user(struct dsa_port *dp, const char *name) |
1112 | { |
1113 | dp->type = DSA_PORT_TYPE_USER; |
1114 | dp->name = name; |
1115 | |
1116 | return 0; |
1117 | } |
1118 | |
1119 | static int dsa_port_parse_dsa(struct dsa_port *dp) |
1120 | { |
1121 | dp->type = DSA_PORT_TYPE_DSA; |
1122 | |
1123 | return 0; |
1124 | } |
1125 | |
1126 | static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, |
1127 | struct net_device *conduit) |
1128 | { |
1129 | enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE; |
1130 | struct dsa_switch *mds, *ds = dp->ds; |
1131 | unsigned int mdp_upstream; |
1132 | struct dsa_port *mdp; |
1133 | |
1134 | /* It is possible to stack DSA switches onto one another when that |
1135 | * happens the switch driver may want to know if its tagging protocol |
1136 | * is going to work in such a configuration. |
1137 | */ |
1138 | if (dsa_user_dev_check(dev: conduit)) { |
1139 | mdp = dsa_user_to_port(dev: conduit); |
1140 | mds = mdp->ds; |
1141 | mdp_upstream = dsa_upstream_port(ds: mds, port: mdp->index); |
1142 | tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream, |
1143 | DSA_TAG_PROTO_NONE); |
1144 | } |
1145 | |
1146 | /* If the conduit device is not itself a DSA user in a disjoint DSA |
1147 | * tree, then return immediately. |
1148 | */ |
1149 | return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol); |
1150 | } |
1151 | |
1152 | static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit, |
1153 | const char *user_protocol) |
1154 | { |
1155 | const struct dsa_device_ops *tag_ops = NULL; |
1156 | struct dsa_switch *ds = dp->ds; |
1157 | struct dsa_switch_tree *dst = ds->dst; |
1158 | enum dsa_tag_protocol default_proto; |
1159 | |
1160 | /* Find out which protocol the switch would prefer. */ |
1161 | default_proto = dsa_get_tag_protocol(dp, conduit); |
1162 | if (dst->default_proto) { |
1163 | if (dst->default_proto != default_proto) { |
1164 | dev_err(ds->dev, |
1165 | "A DSA switch tree can have only one tagging protocol\n" ); |
1166 | return -EINVAL; |
1167 | } |
1168 | } else { |
1169 | dst->default_proto = default_proto; |
1170 | } |
1171 | |
1172 | /* See if the user wants to override that preference. */ |
1173 | if (user_protocol) { |
1174 | if (!ds->ops->change_tag_protocol) { |
1175 | dev_err(ds->dev, "Tag protocol cannot be modified\n" ); |
1176 | return -EINVAL; |
1177 | } |
1178 | |
1179 | tag_ops = dsa_tag_driver_get_by_name(name: user_protocol); |
1180 | if (IS_ERR(ptr: tag_ops)) { |
1181 | dev_warn(ds->dev, |
1182 | "Failed to find a tagging driver for protocol %s, using default\n" , |
1183 | user_protocol); |
1184 | tag_ops = NULL; |
1185 | } |
1186 | } |
1187 | |
1188 | if (!tag_ops) |
1189 | tag_ops = dsa_tag_driver_get_by_id(tag_protocol: default_proto); |
1190 | |
1191 | if (IS_ERR(ptr: tag_ops)) { |
1192 | if (PTR_ERR(ptr: tag_ops) == -ENOPROTOOPT) |
1193 | return -EPROBE_DEFER; |
1194 | |
1195 | dev_warn(ds->dev, "No tagger for this switch\n" ); |
1196 | return PTR_ERR(ptr: tag_ops); |
1197 | } |
1198 | |
1199 | if (dst->tag_ops) { |
1200 | if (dst->tag_ops != tag_ops) { |
1201 | dev_err(ds->dev, |
1202 | "A DSA switch tree can have only one tagging protocol\n" ); |
1203 | |
1204 | dsa_tag_driver_put(ops: tag_ops); |
1205 | return -EINVAL; |
1206 | } |
1207 | |
1208 | /* In the case of multiple CPU ports per switch, the tagging |
1209 | * protocol is still reference-counted only per switch tree. |
1210 | */ |
1211 | dsa_tag_driver_put(ops: tag_ops); |
1212 | } else { |
1213 | dst->tag_ops = tag_ops; |
1214 | } |
1215 | |
1216 | dp->conduit = conduit; |
1217 | dp->type = DSA_PORT_TYPE_CPU; |
1218 | dsa_port_set_tag_protocol(cpu_dp: dp, tag_ops: dst->tag_ops); |
1219 | dp->dst = dst; |
1220 | |
1221 | /* At this point, the tree may be configured to use a different |
1222 | * tagger than the one chosen by the switch driver during |
1223 | * .setup, in the case when a user selects a custom protocol |
1224 | * through the DT. |
1225 | * |
1226 | * This is resolved by syncing the driver with the tree in |
1227 | * dsa_switch_setup_tag_protocol once .setup has run and the |
1228 | * driver is ready to accept calls to .change_tag_protocol. If |
1229 | * the driver does not support the custom protocol at that |
1230 | * point, the tree is wholly rejected, thereby ensuring that the |
1231 | * tree and driver are always in agreement on the protocol to |
1232 | * use. |
1233 | */ |
1234 | return 0; |
1235 | } |
1236 | |
1237 | static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) |
1238 | { |
1239 | struct device_node *ethernet = of_parse_phandle(np: dn, phandle_name: "ethernet" , index: 0); |
1240 | const char *name = of_get_property(node: dn, name: "label" , NULL); |
1241 | bool link = of_property_read_bool(np: dn, propname: "link" ); |
1242 | |
1243 | dp->dn = dn; |
1244 | |
1245 | if (ethernet) { |
1246 | struct net_device *conduit; |
1247 | const char *user_protocol; |
1248 | |
1249 | conduit = of_find_net_device_by_node(np: ethernet); |
1250 | of_node_put(node: ethernet); |
1251 | if (!conduit) |
1252 | return -EPROBE_DEFER; |
1253 | |
1254 | user_protocol = of_get_property(node: dn, name: "dsa-tag-protocol" , NULL); |
1255 | return dsa_port_parse_cpu(dp, conduit, user_protocol); |
1256 | } |
1257 | |
1258 | if (link) |
1259 | return dsa_port_parse_dsa(dp); |
1260 | |
1261 | return dsa_port_parse_user(dp, name); |
1262 | } |
1263 | |
1264 | static int dsa_switch_parse_ports_of(struct dsa_switch *ds, |
1265 | struct device_node *dn) |
1266 | { |
1267 | struct device_node *ports, *port; |
1268 | struct dsa_port *dp; |
1269 | int err = 0; |
1270 | u32 reg; |
1271 | |
1272 | ports = of_get_child_by_name(node: dn, name: "ports" ); |
1273 | if (!ports) { |
1274 | /* The second possibility is "ethernet-ports" */ |
1275 | ports = of_get_child_by_name(node: dn, name: "ethernet-ports" ); |
1276 | if (!ports) { |
1277 | dev_err(ds->dev, "no ports child node found\n" ); |
1278 | return -EINVAL; |
1279 | } |
1280 | } |
1281 | |
1282 | for_each_available_child_of_node(ports, port) { |
1283 | err = of_property_read_u32(np: port, propname: "reg" , out_value: ®); |
1284 | if (err) { |
1285 | of_node_put(node: port); |
1286 | goto out_put_node; |
1287 | } |
1288 | |
1289 | if (reg >= ds->num_ports) { |
1290 | dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n" , |
1291 | port, reg, ds->num_ports); |
1292 | of_node_put(node: port); |
1293 | err = -EINVAL; |
1294 | goto out_put_node; |
1295 | } |
1296 | |
1297 | dp = dsa_to_port(ds, p: reg); |
1298 | |
1299 | err = dsa_port_parse_of(dp, dn: port); |
1300 | if (err) { |
1301 | of_node_put(node: port); |
1302 | goto out_put_node; |
1303 | } |
1304 | } |
1305 | |
1306 | out_put_node: |
1307 | of_node_put(node: ports); |
1308 | return err; |
1309 | } |
1310 | |
1311 | static int dsa_switch_parse_member_of(struct dsa_switch *ds, |
1312 | struct device_node *dn) |
1313 | { |
1314 | u32 m[2] = { 0, 0 }; |
1315 | int sz; |
1316 | |
1317 | /* Don't error out if this optional property isn't found */ |
1318 | sz = of_property_read_variable_u32_array(np: dn, propname: "dsa,member" , out_values: m, sz_min: 2, sz_max: 2); |
1319 | if (sz < 0 && sz != -EINVAL) |
1320 | return sz; |
1321 | |
1322 | ds->index = m[1]; |
1323 | |
1324 | ds->dst = dsa_tree_touch(index: m[0]); |
1325 | if (!ds->dst) |
1326 | return -ENOMEM; |
1327 | |
1328 | if (dsa_switch_find(ds->dst->index, ds->index)) { |
1329 | dev_err(ds->dev, |
1330 | "A DSA switch with index %d already exists in tree %d\n" , |
1331 | ds->index, ds->dst->index); |
1332 | return -EEXIST; |
1333 | } |
1334 | |
1335 | if (ds->dst->last_switch < ds->index) |
1336 | ds->dst->last_switch = ds->index; |
1337 | |
1338 | return 0; |
1339 | } |
1340 | |
1341 | static int dsa_switch_touch_ports(struct dsa_switch *ds) |
1342 | { |
1343 | struct dsa_port *dp; |
1344 | int port; |
1345 | |
1346 | for (port = 0; port < ds->num_ports; port++) { |
1347 | dp = dsa_port_touch(ds, index: port); |
1348 | if (!dp) |
1349 | return -ENOMEM; |
1350 | } |
1351 | |
1352 | return 0; |
1353 | } |
1354 | |
1355 | static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn) |
1356 | { |
1357 | int err; |
1358 | |
1359 | err = dsa_switch_parse_member_of(ds, dn); |
1360 | if (err) |
1361 | return err; |
1362 | |
1363 | err = dsa_switch_touch_ports(ds); |
1364 | if (err) |
1365 | return err; |
1366 | |
1367 | return dsa_switch_parse_ports_of(ds, dn); |
1368 | } |
1369 | |
1370 | static int dev_is_class(struct device *dev, void *class) |
1371 | { |
1372 | if (dev->class != NULL && !strcmp(dev->class->name, class)) |
1373 | return 1; |
1374 | |
1375 | return 0; |
1376 | } |
1377 | |
1378 | static struct device *dev_find_class(struct device *parent, char *class) |
1379 | { |
1380 | if (dev_is_class(dev: parent, class)) { |
1381 | get_device(dev: parent); |
1382 | return parent; |
1383 | } |
1384 | |
1385 | return device_find_child(dev: parent, data: class, match: dev_is_class); |
1386 | } |
1387 | |
1388 | static struct net_device *dsa_dev_to_net_device(struct device *dev) |
1389 | { |
1390 | struct device *d; |
1391 | |
1392 | d = dev_find_class(parent: dev, class: "net" ); |
1393 | if (d != NULL) { |
1394 | struct net_device *nd; |
1395 | |
1396 | nd = to_net_dev(d); |
1397 | dev_hold(dev: nd); |
1398 | put_device(dev: d); |
1399 | |
1400 | return nd; |
1401 | } |
1402 | |
1403 | return NULL; |
1404 | } |
1405 | |
1406 | static int dsa_port_parse(struct dsa_port *dp, const char *name, |
1407 | struct device *dev) |
1408 | { |
1409 | if (!strcmp(name, "cpu" )) { |
1410 | struct net_device *conduit; |
1411 | |
1412 | conduit = dsa_dev_to_net_device(dev); |
1413 | if (!conduit) |
1414 | return -EPROBE_DEFER; |
1415 | |
1416 | dev_put(dev: conduit); |
1417 | |
1418 | return dsa_port_parse_cpu(dp, conduit, NULL); |
1419 | } |
1420 | |
1421 | if (!strcmp(name, "dsa" )) |
1422 | return dsa_port_parse_dsa(dp); |
1423 | |
1424 | return dsa_port_parse_user(dp, name); |
1425 | } |
1426 | |
1427 | static int dsa_switch_parse_ports(struct dsa_switch *ds, |
1428 | struct dsa_chip_data *cd) |
1429 | { |
1430 | bool valid_name_found = false; |
1431 | struct dsa_port *dp; |
1432 | struct device *dev; |
1433 | const char *name; |
1434 | unsigned int i; |
1435 | int err; |
1436 | |
1437 | for (i = 0; i < DSA_MAX_PORTS; i++) { |
1438 | name = cd->port_names[i]; |
1439 | dev = cd->netdev[i]; |
1440 | dp = dsa_to_port(ds, p: i); |
1441 | |
1442 | if (!name) |
1443 | continue; |
1444 | |
1445 | err = dsa_port_parse(dp, name, dev); |
1446 | if (err) |
1447 | return err; |
1448 | |
1449 | valid_name_found = true; |
1450 | } |
1451 | |
1452 | if (!valid_name_found && i == DSA_MAX_PORTS) |
1453 | return -EINVAL; |
1454 | |
1455 | return 0; |
1456 | } |
1457 | |
1458 | static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd) |
1459 | { |
1460 | int err; |
1461 | |
1462 | ds->cd = cd; |
1463 | |
1464 | /* We don't support interconnected switches nor multiple trees via |
1465 | * platform data, so this is the unique switch of the tree. |
1466 | */ |
1467 | ds->index = 0; |
1468 | ds->dst = dsa_tree_touch(index: 0); |
1469 | if (!ds->dst) |
1470 | return -ENOMEM; |
1471 | |
1472 | err = dsa_switch_touch_ports(ds); |
1473 | if (err) |
1474 | return err; |
1475 | |
1476 | return dsa_switch_parse_ports(ds, cd); |
1477 | } |
1478 | |
1479 | static void dsa_switch_release_ports(struct dsa_switch *ds) |
1480 | { |
1481 | struct dsa_port *dp, *next; |
1482 | |
1483 | dsa_switch_for_each_port_safe(dp, next, ds) { |
1484 | WARN_ON(!list_empty(&dp->fdbs)); |
1485 | WARN_ON(!list_empty(&dp->mdbs)); |
1486 | WARN_ON(!list_empty(&dp->vlans)); |
1487 | list_del(entry: &dp->list); |
1488 | kfree(objp: dp); |
1489 | } |
1490 | } |
1491 | |
1492 | static int dsa_switch_probe(struct dsa_switch *ds) |
1493 | { |
1494 | struct dsa_switch_tree *dst; |
1495 | struct dsa_chip_data *pdata; |
1496 | struct device_node *np; |
1497 | int err; |
1498 | |
1499 | if (!ds->dev) |
1500 | return -ENODEV; |
1501 | |
1502 | pdata = ds->dev->platform_data; |
1503 | np = ds->dev->of_node; |
1504 | |
1505 | if (!ds->num_ports) |
1506 | return -EINVAL; |
1507 | |
1508 | if (np) { |
1509 | err = dsa_switch_parse_of(ds, dn: np); |
1510 | if (err) |
1511 | dsa_switch_release_ports(ds); |
1512 | } else if (pdata) { |
1513 | err = dsa_switch_parse(ds, cd: pdata); |
1514 | if (err) |
1515 | dsa_switch_release_ports(ds); |
1516 | } else { |
1517 | err = -ENODEV; |
1518 | } |
1519 | |
1520 | if (err) |
1521 | return err; |
1522 | |
1523 | dst = ds->dst; |
1524 | dsa_tree_get(dst); |
1525 | err = dsa_tree_setup(dst); |
1526 | if (err) { |
1527 | dsa_switch_release_ports(ds); |
1528 | dsa_tree_put(dst); |
1529 | } |
1530 | |
1531 | return err; |
1532 | } |
1533 | |
1534 | int dsa_register_switch(struct dsa_switch *ds) |
1535 | { |
1536 | int err; |
1537 | |
1538 | mutex_lock(&dsa2_mutex); |
1539 | err = dsa_switch_probe(ds); |
1540 | dsa_tree_put(dst: ds->dst); |
1541 | mutex_unlock(lock: &dsa2_mutex); |
1542 | |
1543 | return err; |
1544 | } |
1545 | EXPORT_SYMBOL_GPL(dsa_register_switch); |
1546 | |
1547 | static void dsa_switch_remove(struct dsa_switch *ds) |
1548 | { |
1549 | struct dsa_switch_tree *dst = ds->dst; |
1550 | |
1551 | dsa_tree_teardown(dst); |
1552 | dsa_switch_release_ports(ds); |
1553 | dsa_tree_put(dst); |
1554 | } |
1555 | |
1556 | void dsa_unregister_switch(struct dsa_switch *ds) |
1557 | { |
1558 | mutex_lock(&dsa2_mutex); |
1559 | dsa_switch_remove(ds); |
1560 | mutex_unlock(lock: &dsa2_mutex); |
1561 | } |
1562 | EXPORT_SYMBOL_GPL(dsa_unregister_switch); |
1563 | |
1564 | /* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is |
1565 | * blocking that operation from completion, due to the dev_hold taken inside |
1566 | * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of |
1567 | * the DSA conduit, so that the system can reboot successfully. |
1568 | */ |
1569 | void dsa_switch_shutdown(struct dsa_switch *ds) |
1570 | { |
1571 | struct net_device *conduit, *user_dev; |
1572 | struct dsa_port *dp; |
1573 | |
1574 | mutex_lock(&dsa2_mutex); |
1575 | |
1576 | if (!ds->setup) |
1577 | goto out; |
1578 | |
1579 | rtnl_lock(); |
1580 | |
1581 | dsa_switch_for_each_user_port(dp, ds) { |
1582 | conduit = dsa_port_to_conduit(dp); |
1583 | user_dev = dp->user; |
1584 | |
1585 | netdev_upper_dev_unlink(dev: conduit, upper_dev: user_dev); |
1586 | } |
1587 | |
1588 | /* Disconnect from further netdevice notifiers on the conduit, |
1589 | * since netdev_uses_dsa() will now return false. |
1590 | */ |
1591 | dsa_switch_for_each_cpu_port(dp, ds) |
1592 | dp->conduit->dsa_ptr = NULL; |
1593 | |
1594 | rtnl_unlock(); |
1595 | out: |
1596 | mutex_unlock(lock: &dsa2_mutex); |
1597 | } |
1598 | EXPORT_SYMBOL_GPL(dsa_switch_shutdown); |
1599 | |
1600 | #ifdef CONFIG_PM_SLEEP |
1601 | static bool dsa_port_is_initialized(const struct dsa_port *dp) |
1602 | { |
1603 | return dp->type == DSA_PORT_TYPE_USER && dp->user; |
1604 | } |
1605 | |
1606 | int dsa_switch_suspend(struct dsa_switch *ds) |
1607 | { |
1608 | struct dsa_port *dp; |
1609 | int ret = 0; |
1610 | |
1611 | /* Suspend user network devices */ |
1612 | dsa_switch_for_each_port(dp, ds) { |
1613 | if (!dsa_port_is_initialized(dp)) |
1614 | continue; |
1615 | |
1616 | ret = dsa_user_suspend(user_dev: dp->user); |
1617 | if (ret) |
1618 | return ret; |
1619 | } |
1620 | |
1621 | if (ds->ops->suspend) |
1622 | ret = ds->ops->suspend(ds); |
1623 | |
1624 | return ret; |
1625 | } |
1626 | EXPORT_SYMBOL_GPL(dsa_switch_suspend); |
1627 | |
1628 | int dsa_switch_resume(struct dsa_switch *ds) |
1629 | { |
1630 | struct dsa_port *dp; |
1631 | int ret = 0; |
1632 | |
1633 | if (ds->ops->resume) |
1634 | ret = ds->ops->resume(ds); |
1635 | |
1636 | if (ret) |
1637 | return ret; |
1638 | |
1639 | /* Resume user network devices */ |
1640 | dsa_switch_for_each_port(dp, ds) { |
1641 | if (!dsa_port_is_initialized(dp)) |
1642 | continue; |
1643 | |
1644 | ret = dsa_user_resume(user_dev: dp->user); |
1645 | if (ret) |
1646 | return ret; |
1647 | } |
1648 | |
1649 | return 0; |
1650 | } |
1651 | EXPORT_SYMBOL_GPL(dsa_switch_resume); |
1652 | #endif |
1653 | |
1654 | struct dsa_port *dsa_port_from_netdev(struct net_device *netdev) |
1655 | { |
1656 | if (!netdev || !dsa_user_dev_check(dev: netdev)) |
1657 | return ERR_PTR(error: -ENODEV); |
1658 | |
1659 | return dsa_user_to_port(dev: netdev); |
1660 | } |
1661 | EXPORT_SYMBOL_GPL(dsa_port_from_netdev); |
1662 | |
1663 | bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b) |
1664 | { |
1665 | if (a->type != b->type) |
1666 | return false; |
1667 | |
1668 | switch (a->type) { |
1669 | case DSA_DB_PORT: |
1670 | return a->dp == b->dp; |
1671 | case DSA_DB_LAG: |
1672 | return a->lag.dev == b->lag.dev; |
1673 | case DSA_DB_BRIDGE: |
1674 | return a->bridge.num == b->bridge.num; |
1675 | default: |
1676 | WARN_ON(1); |
1677 | return false; |
1678 | } |
1679 | } |
1680 | |
1681 | bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port, |
1682 | const unsigned char *addr, u16 vid, |
1683 | struct dsa_db db) |
1684 | { |
1685 | struct dsa_port *dp = dsa_to_port(ds, p: port); |
1686 | struct dsa_mac_addr *a; |
1687 | |
1688 | lockdep_assert_held(&dp->addr_lists_lock); |
1689 | |
1690 | list_for_each_entry(a, &dp->fdbs, list) { |
1691 | if (!ether_addr_equal(addr1: a->addr, addr2: addr) || a->vid != vid) |
1692 | continue; |
1693 | |
1694 | if (a->db.type == db.type && !dsa_db_equal(a: &a->db, b: &db)) |
1695 | return true; |
1696 | } |
1697 | |
1698 | return false; |
1699 | } |
1700 | EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db); |
1701 | |
1702 | bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, |
1703 | const struct switchdev_obj_port_mdb *mdb, |
1704 | struct dsa_db db) |
1705 | { |
1706 | struct dsa_port *dp = dsa_to_port(ds, p: port); |
1707 | struct dsa_mac_addr *a; |
1708 | |
1709 | lockdep_assert_held(&dp->addr_lists_lock); |
1710 | |
1711 | list_for_each_entry(a, &dp->mdbs, list) { |
1712 | if (!ether_addr_equal(addr1: a->addr, addr2: mdb->addr) || a->vid != mdb->vid) |
1713 | continue; |
1714 | |
1715 | if (a->db.type == db.type && !dsa_db_equal(a: &a->db, b: &db)) |
1716 | return true; |
1717 | } |
1718 | |
1719 | return false; |
1720 | } |
1721 | EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); |
1722 | |
1723 | static const struct dsa_stubs __dsa_stubs = { |
1724 | .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate, |
1725 | }; |
1726 | |
1727 | static void dsa_register_stubs(void) |
1728 | { |
1729 | dsa_stubs = &__dsa_stubs; |
1730 | } |
1731 | |
1732 | static void dsa_unregister_stubs(void) |
1733 | { |
1734 | dsa_stubs = NULL; |
1735 | } |
1736 | |
1737 | static int __init dsa_init_module(void) |
1738 | { |
1739 | int rc; |
1740 | |
1741 | dsa_owq = alloc_ordered_workqueue("dsa_ordered" , |
1742 | WQ_MEM_RECLAIM); |
1743 | if (!dsa_owq) |
1744 | return -ENOMEM; |
1745 | |
1746 | rc = dsa_user_register_notifier(); |
1747 | if (rc) |
1748 | goto register_notifier_fail; |
1749 | |
1750 | dev_add_pack(pt: &dsa_pack_type); |
1751 | |
1752 | rc = rtnl_link_register(ops: &dsa_link_ops); |
1753 | if (rc) |
1754 | goto netlink_register_fail; |
1755 | |
1756 | dsa_register_stubs(); |
1757 | |
1758 | return 0; |
1759 | |
1760 | netlink_register_fail: |
1761 | dsa_user_unregister_notifier(); |
1762 | dev_remove_pack(pt: &dsa_pack_type); |
1763 | register_notifier_fail: |
1764 | destroy_workqueue(wq: dsa_owq); |
1765 | |
1766 | return rc; |
1767 | } |
1768 | module_init(dsa_init_module); |
1769 | |
1770 | static void __exit dsa_cleanup_module(void) |
1771 | { |
1772 | dsa_unregister_stubs(); |
1773 | |
1774 | rtnl_link_unregister(ops: &dsa_link_ops); |
1775 | |
1776 | dsa_user_unregister_notifier(); |
1777 | dev_remove_pack(pt: &dsa_pack_type); |
1778 | destroy_workqueue(wq: dsa_owq); |
1779 | } |
1780 | module_exit(dsa_cleanup_module); |
1781 | |
1782 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>" ); |
1783 | MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips" ); |
1784 | MODULE_LICENSE("GPL" ); |
1785 | MODULE_ALIAS("platform:dsa" ); |
1786 | |