1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7 */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18
19 #include "dsa_priv.h"
20
21 static LIST_HEAD(dsa_tree_list);
22 static DEFINE_MUTEX(dsa2_mutex);
23
24 static const struct devlink_ops dsa_devlink_ops = {
25 };
26
dsa_tree_find(int index)27 static struct dsa_switch_tree *dsa_tree_find(int index)
28 {
29 struct dsa_switch_tree *dst;
30
31 list_for_each_entry(dst, &dsa_tree_list, list)
32 if (dst->index == index)
33 return dst;
34
35 return NULL;
36 }
37
dsa_tree_alloc(int index)38 static struct dsa_switch_tree *dsa_tree_alloc(int index)
39 {
40 struct dsa_switch_tree *dst;
41
42 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
43 if (!dst)
44 return NULL;
45
46 dst->index = index;
47
48 INIT_LIST_HEAD(&dst->list);
49 list_add_tail(&dst->list, &dsa_tree_list);
50
51 kref_init(&dst->refcount);
52
53 return dst;
54 }
55
dsa_tree_free(struct dsa_switch_tree * dst)56 static void dsa_tree_free(struct dsa_switch_tree *dst)
57 {
58 list_del(&dst->list);
59 kfree(dst);
60 }
61
dsa_tree_get(struct dsa_switch_tree * dst)62 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
63 {
64 if (dst)
65 kref_get(&dst->refcount);
66
67 return dst;
68 }
69
dsa_tree_touch(int index)70 static struct dsa_switch_tree *dsa_tree_touch(int index)
71 {
72 struct dsa_switch_tree *dst;
73
74 dst = dsa_tree_find(index);
75 if (dst)
76 return dsa_tree_get(dst);
77 else
78 return dsa_tree_alloc(index);
79 }
80
dsa_tree_release(struct kref * ref)81 static void dsa_tree_release(struct kref *ref)
82 {
83 struct dsa_switch_tree *dst;
84
85 dst = container_of(ref, struct dsa_switch_tree, refcount);
86
87 dsa_tree_free(dst);
88 }
89
dsa_tree_put(struct dsa_switch_tree * dst)90 static void dsa_tree_put(struct dsa_switch_tree *dst)
91 {
92 if (dst)
93 kref_put(&dst->refcount, dsa_tree_release);
94 }
95
dsa_port_is_dsa(struct dsa_port * port)96 static bool dsa_port_is_dsa(struct dsa_port *port)
97 {
98 return port->type == DSA_PORT_TYPE_DSA;
99 }
100
dsa_port_is_cpu(struct dsa_port * port)101 static bool dsa_port_is_cpu(struct dsa_port *port)
102 {
103 return port->type == DSA_PORT_TYPE_CPU;
104 }
105
dsa_port_is_user(struct dsa_port * dp)106 static bool dsa_port_is_user(struct dsa_port *dp)
107 {
108 return dp->type == DSA_PORT_TYPE_USER;
109 }
110
dsa_tree_find_port_by_node(struct dsa_switch_tree * dst,struct device_node * dn)111 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
112 struct device_node *dn)
113 {
114 struct dsa_switch *ds;
115 struct dsa_port *dp;
116 int device, port;
117
118 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
119 ds = dst->ds[device];
120 if (!ds)
121 continue;
122
123 for (port = 0; port < ds->num_ports; port++) {
124 dp = &ds->ports[port];
125
126 if (dp->dn == dn)
127 return dp;
128 }
129 }
130
131 return NULL;
132 }
133
dsa_port_setup_routing_table(struct dsa_port * dp)134 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
135 {
136 struct dsa_switch *ds = dp->ds;
137 struct dsa_switch_tree *dst = ds->dst;
138 struct device_node *dn = dp->dn;
139 struct of_phandle_iterator it;
140 struct dsa_port *link_dp;
141 int err;
142
143 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
144 link_dp = dsa_tree_find_port_by_node(dst, it.node);
145 if (!link_dp) {
146 of_node_put(it.node);
147 return false;
148 }
149
150 ds->rtable[link_dp->ds->index] = dp->index;
151 }
152
153 return true;
154 }
155
dsa_switch_setup_routing_table(struct dsa_switch * ds)156 static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
157 {
158 bool complete = true;
159 struct dsa_port *dp;
160 int i;
161
162 for (i = 0; i < DSA_MAX_SWITCHES; i++)
163 ds->rtable[i] = DSA_RTABLE_NONE;
164
165 for (i = 0; i < ds->num_ports; i++) {
166 dp = &ds->ports[i];
167
168 if (dsa_port_is_dsa(dp)) {
169 complete = dsa_port_setup_routing_table(dp);
170 if (!complete)
171 break;
172 }
173 }
174
175 return complete;
176 }
177
dsa_tree_setup_routing_table(struct dsa_switch_tree * dst)178 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
179 {
180 struct dsa_switch *ds;
181 bool complete = true;
182 int device;
183
184 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
185 ds = dst->ds[device];
186 if (!ds)
187 continue;
188
189 complete = dsa_switch_setup_routing_table(ds);
190 if (!complete)
191 break;
192 }
193
194 return complete;
195 }
196
dsa_tree_find_first_cpu(struct dsa_switch_tree * dst)197 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
198 {
199 struct dsa_switch *ds;
200 struct dsa_port *dp;
201 int device, port;
202
203 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
204 ds = dst->ds[device];
205 if (!ds)
206 continue;
207
208 for (port = 0; port < ds->num_ports; port++) {
209 dp = &ds->ports[port];
210
211 if (dsa_port_is_cpu(dp))
212 return dp;
213 }
214 }
215
216 return NULL;
217 }
218
dsa_tree_setup_default_cpu(struct dsa_switch_tree * dst)219 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
220 {
221 struct dsa_switch *ds;
222 struct dsa_port *dp;
223 int device, port;
224
225 /* DSA currently only supports a single CPU port */
226 dst->cpu_dp = dsa_tree_find_first_cpu(dst);
227 if (!dst->cpu_dp) {
228 pr_warn("Tree has no master device\n");
229 return -EINVAL;
230 }
231
232 /* Assign the default CPU port to all ports of the fabric */
233 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
234 ds = dst->ds[device];
235 if (!ds)
236 continue;
237
238 for (port = 0; port < ds->num_ports; port++) {
239 dp = &ds->ports[port];
240
241 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
242 dp->cpu_dp = dst->cpu_dp;
243 }
244 }
245
246 return 0;
247 }
248
dsa_tree_teardown_default_cpu(struct dsa_switch_tree * dst)249 static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
250 {
251 /* DSA currently only supports a single CPU port */
252 dst->cpu_dp = NULL;
253 }
254
dsa_port_setup(struct dsa_port * dp)255 static int dsa_port_setup(struct dsa_port *dp)
256 {
257 struct dsa_switch *ds = dp->ds;
258 struct dsa_switch_tree *dst = ds->dst;
259 const unsigned char *id = (const unsigned char *)&dst->index;
260 const unsigned char len = sizeof(dst->index);
261 struct devlink_port *dlp = &dp->devlink_port;
262 bool dsa_port_link_registered = false;
263 bool devlink_port_registered = false;
264 struct devlink *dl = ds->devlink;
265 bool dsa_port_enabled = false;
266 int err = 0;
267
268 switch (dp->type) {
269 case DSA_PORT_TYPE_UNUSED:
270 dsa_port_disable(dp);
271 break;
272 case DSA_PORT_TYPE_CPU:
273 memset(dlp, 0, sizeof(*dlp));
274 devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_CPU,
275 dp->index, false, 0, id, len);
276 err = devlink_port_register(dl, dlp, dp->index);
277 if (err)
278 break;
279 devlink_port_registered = true;
280
281 err = dsa_port_link_register_of(dp);
282 if (err)
283 break;
284 dsa_port_link_registered = true;
285
286 err = dsa_port_enable(dp, NULL);
287 if (err)
288 break;
289 dsa_port_enabled = true;
290
291 break;
292 case DSA_PORT_TYPE_DSA:
293 memset(dlp, 0, sizeof(*dlp));
294 devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_DSA,
295 dp->index, false, 0, id, len);
296 err = devlink_port_register(dl, dlp, dp->index);
297 if (err)
298 break;
299 devlink_port_registered = true;
300
301 err = dsa_port_link_register_of(dp);
302 if (err)
303 break;
304 dsa_port_link_registered = true;
305
306 err = dsa_port_enable(dp, NULL);
307 if (err)
308 break;
309 dsa_port_enabled = true;
310
311 break;
312 case DSA_PORT_TYPE_USER:
313 memset(dlp, 0, sizeof(*dlp));
314 devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_PHYSICAL,
315 dp->index, false, 0, id, len);
316 err = devlink_port_register(dl, dlp, dp->index);
317 if (err)
318 break;
319 devlink_port_registered = true;
320
321 dp->mac = of_get_mac_address(dp->dn);
322 err = dsa_slave_create(dp);
323 if (err)
324 break;
325
326 devlink_port_type_eth_set(dlp, dp->slave);
327 break;
328 }
329
330 if (err && dsa_port_enabled)
331 dsa_port_disable(dp);
332 if (err && dsa_port_link_registered)
333 dsa_port_link_unregister_of(dp);
334 if (err && devlink_port_registered)
335 devlink_port_unregister(dlp);
336
337 return err;
338 }
339
dsa_port_teardown(struct dsa_port * dp)340 static void dsa_port_teardown(struct dsa_port *dp)
341 {
342 struct devlink_port *dlp = &dp->devlink_port;
343
344 switch (dp->type) {
345 case DSA_PORT_TYPE_UNUSED:
346 break;
347 case DSA_PORT_TYPE_CPU:
348 dsa_port_disable(dp);
349 dsa_tag_driver_put(dp->tag_ops);
350 devlink_port_unregister(dlp);
351 dsa_port_link_unregister_of(dp);
352 break;
353 case DSA_PORT_TYPE_DSA:
354 dsa_port_disable(dp);
355 devlink_port_unregister(dlp);
356 dsa_port_link_unregister_of(dp);
357 break;
358 case DSA_PORT_TYPE_USER:
359 devlink_port_unregister(dlp);
360 if (dp->slave) {
361 dsa_slave_destroy(dp->slave);
362 dp->slave = NULL;
363 }
364 break;
365 }
366 }
367
dsa_switch_setup(struct dsa_switch * ds)368 static int dsa_switch_setup(struct dsa_switch *ds)
369 {
370 int err = 0;
371
372 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
373 * driver and before ops->setup() has run, since the switch drivers and
374 * the slave MDIO bus driver rely on these values for probing PHY
375 * devices or not
376 */
377 ds->phys_mii_mask |= dsa_user_ports(ds);
378
379 /* Add the switch to devlink before calling setup, so that setup can
380 * add dpipe tables
381 */
382 ds->devlink = devlink_alloc(&dsa_devlink_ops, 0);
383 if (!ds->devlink)
384 return -ENOMEM;
385
386 err = devlink_register(ds->devlink, ds->dev);
387 if (err)
388 goto free_devlink;
389
390 err = dsa_switch_register_notifier(ds);
391 if (err)
392 goto unregister_devlink;
393
394 err = ds->ops->setup(ds);
395 if (err < 0)
396 goto unregister_notifier;
397
398 if (!ds->slave_mii_bus && ds->ops->phy_read) {
399 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
400 if (!ds->slave_mii_bus) {
401 err = -ENOMEM;
402 goto teardown;
403 }
404
405 dsa_slave_mii_bus_init(ds);
406
407 err = mdiobus_register(ds->slave_mii_bus);
408 if (err < 0)
409 goto teardown;
410 }
411
412 return 0;
413
414 teardown:
415 if (ds->ops->teardown)
416 ds->ops->teardown(ds);
417 unregister_notifier:
418 dsa_switch_unregister_notifier(ds);
419 unregister_devlink:
420 devlink_unregister(ds->devlink);
421 free_devlink:
422 devlink_free(ds->devlink);
423 ds->devlink = NULL;
424
425 return err;
426 }
427
dsa_switch_teardown(struct dsa_switch * ds)428 static void dsa_switch_teardown(struct dsa_switch *ds)
429 {
430 if (ds->slave_mii_bus && ds->ops->phy_read)
431 mdiobus_unregister(ds->slave_mii_bus);
432
433 dsa_switch_unregister_notifier(ds);
434
435 if (ds->ops->teardown)
436 ds->ops->teardown(ds);
437
438 if (ds->devlink) {
439 devlink_unregister(ds->devlink);
440 devlink_free(ds->devlink);
441 ds->devlink = NULL;
442 }
443
444 }
445
dsa_tree_setup_switches(struct dsa_switch_tree * dst)446 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
447 {
448 struct dsa_switch *ds;
449 struct dsa_port *dp;
450 int device, port, i;
451 int err = 0;
452
453 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
454 ds = dst->ds[device];
455 if (!ds)
456 continue;
457
458 err = dsa_switch_setup(ds);
459 if (err)
460 goto switch_teardown;
461
462 for (port = 0; port < ds->num_ports; port++) {
463 dp = &ds->ports[port];
464
465 err = dsa_port_setup(dp);
466 if (err)
467 continue;
468 }
469 }
470
471 return 0;
472
473 switch_teardown:
474 for (i = 0; i < device; i++) {
475 ds = dst->ds[i];
476 if (!ds)
477 continue;
478
479 for (port = 0; port < ds->num_ports; port++) {
480 dp = &ds->ports[port];
481
482 dsa_port_teardown(dp);
483 }
484
485 dsa_switch_teardown(ds);
486 }
487
488 return err;
489 }
490
dsa_tree_teardown_switches(struct dsa_switch_tree * dst)491 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
492 {
493 struct dsa_switch *ds;
494 struct dsa_port *dp;
495 int device, port;
496
497 for (device = 0; device < DSA_MAX_SWITCHES; device++) {
498 ds = dst->ds[device];
499 if (!ds)
500 continue;
501
502 for (port = 0; port < ds->num_ports; port++) {
503 dp = &ds->ports[port];
504
505 dsa_port_teardown(dp);
506 }
507
508 dsa_switch_teardown(ds);
509 }
510 }
511
dsa_tree_setup_master(struct dsa_switch_tree * dst)512 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
513 {
514 struct dsa_port *cpu_dp = dst->cpu_dp;
515 struct net_device *master = cpu_dp->master;
516
517 /* DSA currently supports a single pair of CPU port and master device */
518 return dsa_master_setup(master, cpu_dp);
519 }
520
dsa_tree_teardown_master(struct dsa_switch_tree * dst)521 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
522 {
523 struct dsa_port *cpu_dp = dst->cpu_dp;
524 struct net_device *master = cpu_dp->master;
525
526 return dsa_master_teardown(master);
527 }
528
dsa_tree_setup(struct dsa_switch_tree * dst)529 static int dsa_tree_setup(struct dsa_switch_tree *dst)
530 {
531 bool complete;
532 int err;
533
534 if (dst->setup) {
535 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
536 dst->index);
537 return -EEXIST;
538 }
539
540 complete = dsa_tree_setup_routing_table(dst);
541 if (!complete)
542 return 0;
543
544 err = dsa_tree_setup_default_cpu(dst);
545 if (err)
546 return err;
547
548 err = dsa_tree_setup_switches(dst);
549 if (err)
550 goto teardown_default_cpu;
551
552 err = dsa_tree_setup_master(dst);
553 if (err)
554 goto teardown_switches;
555
556 dst->setup = true;
557
558 pr_info("DSA: tree %d setup\n", dst->index);
559
560 return 0;
561
562 teardown_switches:
563 dsa_tree_teardown_switches(dst);
564 teardown_default_cpu:
565 dsa_tree_teardown_default_cpu(dst);
566
567 return err;
568 }
569
dsa_tree_teardown(struct dsa_switch_tree * dst)570 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
571 {
572 if (!dst->setup)
573 return;
574
575 dsa_tree_teardown_master(dst);
576
577 dsa_tree_teardown_switches(dst);
578
579 dsa_tree_teardown_default_cpu(dst);
580
581 pr_info("DSA: tree %d torn down\n", dst->index);
582
583 dst->setup = false;
584 }
585
dsa_tree_remove_switch(struct dsa_switch_tree * dst,unsigned int index)586 static void dsa_tree_remove_switch(struct dsa_switch_tree *dst,
587 unsigned int index)
588 {
589 dsa_tree_teardown(dst);
590
591 dst->ds[index] = NULL;
592 dsa_tree_put(dst);
593 }
594
dsa_tree_add_switch(struct dsa_switch_tree * dst,struct dsa_switch * ds)595 static int dsa_tree_add_switch(struct dsa_switch_tree *dst,
596 struct dsa_switch *ds)
597 {
598 unsigned int index = ds->index;
599 int err;
600
601 if (dst->ds[index])
602 return -EBUSY;
603
604 dsa_tree_get(dst);
605 dst->ds[index] = ds;
606
607 err = dsa_tree_setup(dst);
608 if (err) {
609 dst->ds[index] = NULL;
610 dsa_tree_put(dst);
611 }
612
613 return err;
614 }
615
dsa_port_parse_user(struct dsa_port * dp,const char * name)616 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
617 {
618 if (!name)
619 name = "eth%d";
620
621 dp->type = DSA_PORT_TYPE_USER;
622 dp->name = name;
623
624 return 0;
625 }
626
dsa_port_parse_dsa(struct dsa_port * dp)627 static int dsa_port_parse_dsa(struct dsa_port *dp)
628 {
629 dp->type = DSA_PORT_TYPE_DSA;
630
631 return 0;
632 }
633
dsa_port_parse_cpu(struct dsa_port * dp,struct net_device * master)634 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
635 {
636 struct dsa_switch *ds = dp->ds;
637 struct dsa_switch_tree *dst = ds->dst;
638 const struct dsa_device_ops *tag_ops;
639 enum dsa_tag_protocol tag_protocol;
640
641 tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
642 tag_ops = dsa_tag_driver_get(tag_protocol);
643 if (IS_ERR(tag_ops)) {
644 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
645 return -EPROBE_DEFER;
646 dev_warn(ds->dev, "No tagger for this switch\n");
647 return PTR_ERR(tag_ops);
648 }
649
650 dp->type = DSA_PORT_TYPE_CPU;
651 dp->filter = tag_ops->filter;
652 dp->rcv = tag_ops->rcv;
653 dp->tag_ops = tag_ops;
654 dp->master = master;
655 dp->dst = dst;
656
657 return 0;
658 }
659
dsa_port_parse_of(struct dsa_port * dp,struct device_node * dn)660 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
661 {
662 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
663 const char *name = of_get_property(dn, "label", NULL);
664 bool link = of_property_read_bool(dn, "link");
665
666 dp->dn = dn;
667
668 if (ethernet) {
669 struct net_device *master;
670
671 master = of_find_net_device_by_node(ethernet);
672 of_node_put(ethernet);
673 if (!master)
674 return -EPROBE_DEFER;
675
676 return dsa_port_parse_cpu(dp, master);
677 }
678
679 if (link)
680 return dsa_port_parse_dsa(dp);
681
682 return dsa_port_parse_user(dp, name);
683 }
684
dsa_switch_parse_ports_of(struct dsa_switch * ds,struct device_node * dn)685 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
686 struct device_node *dn)
687 {
688 struct device_node *ports, *port;
689 struct dsa_port *dp;
690 int err = 0;
691 u32 reg;
692
693 ports = of_get_child_by_name(dn, "ports");
694 if (!ports) {
695 dev_err(ds->dev, "no ports child node found\n");
696 return -EINVAL;
697 }
698
699 for_each_available_child_of_node(ports, port) {
700 err = of_property_read_u32(port, "reg", ®);
701 if (err)
702 goto out_put_node;
703
704 if (reg >= ds->num_ports) {
705 err = -EINVAL;
706 goto out_put_node;
707 }
708
709 dp = &ds->ports[reg];
710
711 err = dsa_port_parse_of(dp, port);
712 if (err)
713 goto out_put_node;
714 }
715
716 out_put_node:
717 of_node_put(ports);
718 return err;
719 }
720
dsa_switch_parse_member_of(struct dsa_switch * ds,struct device_node * dn)721 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
722 struct device_node *dn)
723 {
724 u32 m[2] = { 0, 0 };
725 int sz;
726
727 /* Don't error out if this optional property isn't found */
728 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
729 if (sz < 0 && sz != -EINVAL)
730 return sz;
731
732 ds->index = m[1];
733 if (ds->index >= DSA_MAX_SWITCHES)
734 return -EINVAL;
735
736 ds->dst = dsa_tree_touch(m[0]);
737 if (!ds->dst)
738 return -ENOMEM;
739
740 return 0;
741 }
742
dsa_switch_parse_of(struct dsa_switch * ds,struct device_node * dn)743 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
744 {
745 int err;
746
747 err = dsa_switch_parse_member_of(ds, dn);
748 if (err)
749 return err;
750
751 return dsa_switch_parse_ports_of(ds, dn);
752 }
753
dsa_port_parse(struct dsa_port * dp,const char * name,struct device * dev)754 static int dsa_port_parse(struct dsa_port *dp, const char *name,
755 struct device *dev)
756 {
757 if (!strcmp(name, "cpu")) {
758 struct net_device *master;
759
760 master = dsa_dev_to_net_device(dev);
761 if (!master)
762 return -EPROBE_DEFER;
763
764 dev_put(master);
765
766 return dsa_port_parse_cpu(dp, master);
767 }
768
769 if (!strcmp(name, "dsa"))
770 return dsa_port_parse_dsa(dp);
771
772 return dsa_port_parse_user(dp, name);
773 }
774
dsa_switch_parse_ports(struct dsa_switch * ds,struct dsa_chip_data * cd)775 static int dsa_switch_parse_ports(struct dsa_switch *ds,
776 struct dsa_chip_data *cd)
777 {
778 bool valid_name_found = false;
779 struct dsa_port *dp;
780 struct device *dev;
781 const char *name;
782 unsigned int i;
783 int err;
784
785 for (i = 0; i < DSA_MAX_PORTS; i++) {
786 name = cd->port_names[i];
787 dev = cd->netdev[i];
788 dp = &ds->ports[i];
789
790 if (!name)
791 continue;
792
793 err = dsa_port_parse(dp, name, dev);
794 if (err)
795 return err;
796
797 valid_name_found = true;
798 }
799
800 if (!valid_name_found && i == DSA_MAX_PORTS)
801 return -EINVAL;
802
803 return 0;
804 }
805
dsa_switch_parse(struct dsa_switch * ds,struct dsa_chip_data * cd)806 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
807 {
808 ds->cd = cd;
809
810 /* We don't support interconnected switches nor multiple trees via
811 * platform data, so this is the unique switch of the tree.
812 */
813 ds->index = 0;
814 ds->dst = dsa_tree_touch(0);
815 if (!ds->dst)
816 return -ENOMEM;
817
818 return dsa_switch_parse_ports(ds, cd);
819 }
820
dsa_switch_add(struct dsa_switch * ds)821 static int dsa_switch_add(struct dsa_switch *ds)
822 {
823 struct dsa_switch_tree *dst = ds->dst;
824
825 return dsa_tree_add_switch(dst, ds);
826 }
827
dsa_switch_probe(struct dsa_switch * ds)828 static int dsa_switch_probe(struct dsa_switch *ds)
829 {
830 struct dsa_chip_data *pdata = ds->dev->platform_data;
831 struct device_node *np = ds->dev->of_node;
832 int err;
833
834 if (np)
835 err = dsa_switch_parse_of(ds, np);
836 else if (pdata)
837 err = dsa_switch_parse(ds, pdata);
838 else
839 err = -ENODEV;
840
841 if (err)
842 return err;
843
844 return dsa_switch_add(ds);
845 }
846
dsa_switch_alloc(struct device * dev,size_t n)847 struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
848 {
849 struct dsa_switch *ds;
850 int i;
851
852 ds = devm_kzalloc(dev, struct_size(ds, ports, n), GFP_KERNEL);
853 if (!ds)
854 return NULL;
855
856 ds->dev = dev;
857 ds->num_ports = n;
858
859 for (i = 0; i < ds->num_ports; ++i) {
860 ds->ports[i].index = i;
861 ds->ports[i].ds = ds;
862 }
863
864 return ds;
865 }
866 EXPORT_SYMBOL_GPL(dsa_switch_alloc);
867
dsa_register_switch(struct dsa_switch * ds)868 int dsa_register_switch(struct dsa_switch *ds)
869 {
870 int err;
871
872 mutex_lock(&dsa2_mutex);
873 err = dsa_switch_probe(ds);
874 dsa_tree_put(ds->dst);
875 mutex_unlock(&dsa2_mutex);
876
877 return err;
878 }
879 EXPORT_SYMBOL_GPL(dsa_register_switch);
880
dsa_switch_remove(struct dsa_switch * ds)881 static void dsa_switch_remove(struct dsa_switch *ds)
882 {
883 struct dsa_switch_tree *dst = ds->dst;
884 unsigned int index = ds->index;
885
886 dsa_tree_remove_switch(dst, index);
887 }
888
dsa_unregister_switch(struct dsa_switch * ds)889 void dsa_unregister_switch(struct dsa_switch *ds)
890 {
891 mutex_lock(&dsa2_mutex);
892 dsa_switch_remove(ds);
893 mutex_unlock(&dsa2_mutex);
894 }
895 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
896