• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18 
19 #include "dsa_priv.h"
20 
21 static DEFINE_MUTEX(dsa2_mutex);
22 LIST_HEAD(dsa_tree_list);
23 
24 /* Track the bridges with forwarding offload enabled */
25 static unsigned long dsa_fwd_offloading_bridges;
26 
27 /**
28  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
29  * @dst: collection of struct dsa_switch devices to notify.
30  * @e: event, must be of type DSA_NOTIFIER_*
31  * @v: event-specific value.
32  *
33  * Given a struct dsa_switch_tree, this can be used to run a function once for
34  * each member DSA switch. The other alternative of traversing the tree is only
35  * through its ports list, which does not uniquely list the switches.
36  */
dsa_tree_notify(struct dsa_switch_tree * dst,unsigned long e,void * v)37 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
38 {
39 	struct raw_notifier_head *nh = &dst->nh;
40 	int err;
41 
42 	err = raw_notifier_call_chain(nh, e, v);
43 
44 	return notifier_to_errno(err);
45 }
46 
47 /**
48  * dsa_broadcast - Notify all DSA trees in the system.
49  * @e: event, must be of type DSA_NOTIFIER_*
50  * @v: event-specific value.
51  *
52  * Can be used to notify the switching fabric of events such as cross-chip
53  * bridging between disjoint trees (such as islands of tagger-compatible
54  * switches bridged by an incompatible middle switch).
55  *
56  * WARNING: this function is not reliable during probe time, because probing
57  * between trees is asynchronous and not all DSA trees might have probed.
58  */
dsa_broadcast(unsigned long e,void * v)59 int dsa_broadcast(unsigned long e, void *v)
60 {
61 	struct dsa_switch_tree *dst;
62 	int err = 0;
63 
64 	list_for_each_entry(dst, &dsa_tree_list, list) {
65 		err = dsa_tree_notify(dst, e, v);
66 		if (err)
67 			break;
68 	}
69 
70 	return err;
71 }
72 
73 /**
74  * dsa_lag_map() - Map LAG netdev to a linear LAG ID
75  * @dst: Tree in which to record the mapping.
76  * @lag: Netdev that is to be mapped to an ID.
77  *
78  * dsa_lag_id/dsa_lag_dev can then be used to translate between the
79  * two spaces. The size of the mapping space is determined by the
80  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
81  * it unset if it is not needed, in which case these functions become
82  * no-ops.
83  */
dsa_lag_map(struct dsa_switch_tree * dst,struct net_device * lag)84 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
85 {
86 	unsigned int id;
87 
88 	if (dsa_lag_id(dst, lag) >= 0)
89 		/* Already mapped */
90 		return;
91 
92 	for (id = 0; id < dst->lags_len; id++) {
93 		if (!dsa_lag_dev(dst, id)) {
94 			dst->lags[id] = lag;
95 			return;
96 		}
97 	}
98 
99 	/* No IDs left, which is OK. Some drivers do not need it. The
100 	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
101 	 * returns an error for this device when joining the LAG. The
102 	 * driver can then return -EOPNOTSUPP back to DSA, which will
103 	 * fall back to a software LAG.
104 	 */
105 }
106 
107 /**
108  * dsa_lag_unmap() - Remove a LAG ID mapping
109  * @dst: Tree in which the mapping is recorded.
110  * @lag: Netdev that was mapped.
111  *
112  * As there may be multiple users of the mapping, it is only removed
113  * if there are no other references to it.
114  */
dsa_lag_unmap(struct dsa_switch_tree * dst,struct net_device * lag)115 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
116 {
117 	struct dsa_port *dp;
118 	unsigned int id;
119 
120 	dsa_lag_foreach_port(dp, dst, lag)
121 		/* There are remaining users of this mapping */
122 		return;
123 
124 	dsa_lags_foreach_id(id, dst) {
125 		if (dsa_lag_dev(dst, id) == lag) {
126 			dst->lags[id] = NULL;
127 			break;
128 		}
129 	}
130 }
131 
dsa_bridge_num_find(const struct net_device * bridge_dev)132 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
133 {
134 	struct dsa_switch_tree *dst;
135 	struct dsa_port *dp;
136 
137 	/* When preparing the offload for a port, it will have a valid
138 	 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
139 	 * However there might be other ports having the same dp->bridge_dev
140 	 * and a valid dp->bridge_num, so just ignore this port.
141 	 */
142 	list_for_each_entry(dst, &dsa_tree_list, list)
143 		list_for_each_entry(dp, &dst->ports, list)
144 			if (dp->bridge_dev == bridge_dev &&
145 			    dp->bridge_num != -1)
146 				return dp->bridge_num;
147 
148 	return -1;
149 }
150 
dsa_bridge_num_get(const struct net_device * bridge_dev,int max)151 int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
152 {
153 	int bridge_num = dsa_bridge_num_find(bridge_dev);
154 
155 	if (bridge_num < 0) {
156 		/* First port that offloads TX forwarding for this bridge */
157 		bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
158 						 DSA_MAX_NUM_OFFLOADING_BRIDGES);
159 		if (bridge_num >= max)
160 			return -1;
161 
162 		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
163 	}
164 
165 	return bridge_num;
166 }
167 
dsa_bridge_num_put(const struct net_device * bridge_dev,int bridge_num)168 void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
169 {
170 	/* Check if the bridge is still in use, otherwise it is time
171 	 * to clean it up so we can reuse this bridge_num later.
172 	 */
173 	if (dsa_bridge_num_find(bridge_dev) < 0)
174 		clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
175 }
176 
dsa_switch_find(int tree_index,int sw_index)177 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
178 {
179 	struct dsa_switch_tree *dst;
180 	struct dsa_port *dp;
181 
182 	list_for_each_entry(dst, &dsa_tree_list, list) {
183 		if (dst->index != tree_index)
184 			continue;
185 
186 		list_for_each_entry(dp, &dst->ports, list) {
187 			if (dp->ds->index != sw_index)
188 				continue;
189 
190 			return dp->ds;
191 		}
192 	}
193 
194 	return NULL;
195 }
196 EXPORT_SYMBOL_GPL(dsa_switch_find);
197 
dsa_tree_find(int index)198 static struct dsa_switch_tree *dsa_tree_find(int index)
199 {
200 	struct dsa_switch_tree *dst;
201 
202 	list_for_each_entry(dst, &dsa_tree_list, list)
203 		if (dst->index == index)
204 			return dst;
205 
206 	return NULL;
207 }
208 
dsa_tree_alloc(int index)209 static struct dsa_switch_tree *dsa_tree_alloc(int index)
210 {
211 	struct dsa_switch_tree *dst;
212 
213 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
214 	if (!dst)
215 		return NULL;
216 
217 	dst->index = index;
218 
219 	INIT_LIST_HEAD(&dst->rtable);
220 
221 	INIT_LIST_HEAD(&dst->ports);
222 
223 	INIT_LIST_HEAD(&dst->list);
224 	list_add_tail(&dst->list, &dsa_tree_list);
225 
226 	kref_init(&dst->refcount);
227 
228 	return dst;
229 }
230 
dsa_tree_free(struct dsa_switch_tree * dst)231 static void dsa_tree_free(struct dsa_switch_tree *dst)
232 {
233 	if (dst->tag_ops)
234 		dsa_tag_driver_put(dst->tag_ops);
235 	list_del(&dst->list);
236 	kfree(dst);
237 }
238 
dsa_tree_get(struct dsa_switch_tree * dst)239 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
240 {
241 	if (dst)
242 		kref_get(&dst->refcount);
243 
244 	return dst;
245 }
246 
dsa_tree_touch(int index)247 static struct dsa_switch_tree *dsa_tree_touch(int index)
248 {
249 	struct dsa_switch_tree *dst;
250 
251 	dst = dsa_tree_find(index);
252 	if (dst)
253 		return dsa_tree_get(dst);
254 	else
255 		return dsa_tree_alloc(index);
256 }
257 
dsa_tree_release(struct kref * ref)258 static void dsa_tree_release(struct kref *ref)
259 {
260 	struct dsa_switch_tree *dst;
261 
262 	dst = container_of(ref, struct dsa_switch_tree, refcount);
263 
264 	dsa_tree_free(dst);
265 }
266 
dsa_tree_put(struct dsa_switch_tree * dst)267 static void dsa_tree_put(struct dsa_switch_tree *dst)
268 {
269 	if (dst)
270 		kref_put(&dst->refcount, dsa_tree_release);
271 }
272 
dsa_tree_find_port_by_node(struct dsa_switch_tree * dst,struct device_node * dn)273 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
274 						   struct device_node *dn)
275 {
276 	struct dsa_port *dp;
277 
278 	list_for_each_entry(dp, &dst->ports, list)
279 		if (dp->dn == dn)
280 			return dp;
281 
282 	return NULL;
283 }
284 
dsa_link_touch(struct dsa_port * dp,struct dsa_port * link_dp)285 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
286 				       struct dsa_port *link_dp)
287 {
288 	struct dsa_switch *ds = dp->ds;
289 	struct dsa_switch_tree *dst;
290 	struct dsa_link *dl;
291 
292 	dst = ds->dst;
293 
294 	list_for_each_entry(dl, &dst->rtable, list)
295 		if (dl->dp == dp && dl->link_dp == link_dp)
296 			return dl;
297 
298 	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
299 	if (!dl)
300 		return NULL;
301 
302 	dl->dp = dp;
303 	dl->link_dp = link_dp;
304 
305 	INIT_LIST_HEAD(&dl->list);
306 	list_add_tail(&dl->list, &dst->rtable);
307 
308 	return dl;
309 }
310 
dsa_port_setup_routing_table(struct dsa_port * dp)311 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
312 {
313 	struct dsa_switch *ds = dp->ds;
314 	struct dsa_switch_tree *dst = ds->dst;
315 	struct device_node *dn = dp->dn;
316 	struct of_phandle_iterator it;
317 	struct dsa_port *link_dp;
318 	struct dsa_link *dl;
319 	int err;
320 
321 	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
322 		link_dp = dsa_tree_find_port_by_node(dst, it.node);
323 		if (!link_dp) {
324 			of_node_put(it.node);
325 			return false;
326 		}
327 
328 		dl = dsa_link_touch(dp, link_dp);
329 		if (!dl) {
330 			of_node_put(it.node);
331 			return false;
332 		}
333 	}
334 
335 	return true;
336 }
337 
dsa_tree_setup_routing_table(struct dsa_switch_tree * dst)338 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
339 {
340 	bool complete = true;
341 	struct dsa_port *dp;
342 
343 	list_for_each_entry(dp, &dst->ports, list) {
344 		if (dsa_port_is_dsa(dp)) {
345 			complete = dsa_port_setup_routing_table(dp);
346 			if (!complete)
347 				break;
348 		}
349 	}
350 
351 	return complete;
352 }
353 
dsa_tree_find_first_cpu(struct dsa_switch_tree * dst)354 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
355 {
356 	struct dsa_port *dp;
357 
358 	list_for_each_entry(dp, &dst->ports, list)
359 		if (dsa_port_is_cpu(dp))
360 			return dp;
361 
362 	return NULL;
363 }
364 
365 /* Assign the default CPU port (the first one in the tree) to all ports of the
366  * fabric which don't already have one as part of their own switch.
367  */
dsa_tree_setup_default_cpu(struct dsa_switch_tree * dst)368 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
369 {
370 	struct dsa_port *cpu_dp, *dp;
371 
372 	cpu_dp = dsa_tree_find_first_cpu(dst);
373 	if (!cpu_dp) {
374 		pr_err("DSA: tree %d has no CPU port\n", dst->index);
375 		return -EINVAL;
376 	}
377 
378 	list_for_each_entry(dp, &dst->ports, list) {
379 		if (dp->cpu_dp)
380 			continue;
381 
382 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
383 			dp->cpu_dp = cpu_dp;
384 	}
385 
386 	return 0;
387 }
388 
389 /* Perform initial assignment of CPU ports to user ports and DSA links in the
390  * fabric, giving preference to CPU ports local to each switch. Default to
391  * using the first CPU port in the switch tree if the port does not have a CPU
392  * port local to this switch.
393  */
dsa_tree_setup_cpu_ports(struct dsa_switch_tree * dst)394 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
395 {
396 	struct dsa_port *cpu_dp, *dp;
397 
398 	list_for_each_entry(cpu_dp, &dst->ports, list) {
399 		if (!dsa_port_is_cpu(cpu_dp))
400 			continue;
401 
402 		list_for_each_entry(dp, &dst->ports, list) {
403 			/* Prefer a local CPU port */
404 			if (dp->ds != cpu_dp->ds)
405 				continue;
406 
407 			/* Prefer the first local CPU port found */
408 			if (dp->cpu_dp)
409 				continue;
410 
411 			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
412 				dp->cpu_dp = cpu_dp;
413 		}
414 	}
415 
416 	return dsa_tree_setup_default_cpu(dst);
417 }
418 
dsa_tree_teardown_cpu_ports(struct dsa_switch_tree * dst)419 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
420 {
421 	struct dsa_port *dp;
422 
423 	list_for_each_entry(dp, &dst->ports, list)
424 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
425 			dp->cpu_dp = NULL;
426 }
427 
dsa_port_setup(struct dsa_port * dp)428 static int dsa_port_setup(struct dsa_port *dp)
429 {
430 	struct devlink_port *dlp = &dp->devlink_port;
431 	bool dsa_port_link_registered = false;
432 	struct dsa_switch *ds = dp->ds;
433 	bool dsa_port_enabled = false;
434 	int err = 0;
435 
436 	if (dp->setup)
437 		return 0;
438 
439 	INIT_LIST_HEAD(&dp->fdbs);
440 	INIT_LIST_HEAD(&dp->mdbs);
441 
442 	if (ds->ops->port_setup) {
443 		err = ds->ops->port_setup(ds, dp->index);
444 		if (err)
445 			return err;
446 	}
447 
448 	switch (dp->type) {
449 	case DSA_PORT_TYPE_UNUSED:
450 		dsa_port_disable(dp);
451 		break;
452 	case DSA_PORT_TYPE_CPU:
453 		err = dsa_port_link_register_of(dp);
454 		if (err)
455 			break;
456 		dsa_port_link_registered = true;
457 
458 		err = dsa_port_enable(dp, NULL);
459 		if (err)
460 			break;
461 		dsa_port_enabled = true;
462 
463 		break;
464 	case DSA_PORT_TYPE_DSA:
465 		err = dsa_port_link_register_of(dp);
466 		if (err)
467 			break;
468 		dsa_port_link_registered = true;
469 
470 		err = dsa_port_enable(dp, NULL);
471 		if (err)
472 			break;
473 		dsa_port_enabled = true;
474 
475 		break;
476 	case DSA_PORT_TYPE_USER:
477 		of_get_mac_address(dp->dn, dp->mac);
478 		err = dsa_slave_create(dp);
479 		if (err)
480 			break;
481 
482 		devlink_port_type_eth_set(dlp, dp->slave);
483 		break;
484 	}
485 
486 	if (err && dsa_port_enabled)
487 		dsa_port_disable(dp);
488 	if (err && dsa_port_link_registered)
489 		dsa_port_link_unregister_of(dp);
490 	if (err) {
491 		if (ds->ops->port_teardown)
492 			ds->ops->port_teardown(ds, dp->index);
493 		return err;
494 	}
495 
496 	dp->setup = true;
497 
498 	return 0;
499 }
500 
dsa_port_devlink_setup(struct dsa_port * dp)501 static int dsa_port_devlink_setup(struct dsa_port *dp)
502 {
503 	struct devlink_port *dlp = &dp->devlink_port;
504 	struct dsa_switch_tree *dst = dp->ds->dst;
505 	struct devlink_port_attrs attrs = {};
506 	struct devlink *dl = dp->ds->devlink;
507 	const unsigned char *id;
508 	unsigned char len;
509 	int err;
510 
511 	id = (const unsigned char *)&dst->index;
512 	len = sizeof(dst->index);
513 
514 	attrs.phys.port_number = dp->index;
515 	memcpy(attrs.switch_id.id, id, len);
516 	attrs.switch_id.id_len = len;
517 	memset(dlp, 0, sizeof(*dlp));
518 
519 	switch (dp->type) {
520 	case DSA_PORT_TYPE_UNUSED:
521 		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
522 		break;
523 	case DSA_PORT_TYPE_CPU:
524 		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
525 		break;
526 	case DSA_PORT_TYPE_DSA:
527 		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
528 		break;
529 	case DSA_PORT_TYPE_USER:
530 		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
531 		break;
532 	}
533 
534 	devlink_port_attrs_set(dlp, &attrs);
535 	err = devlink_port_register(dl, dlp, dp->index);
536 
537 	if (!err)
538 		dp->devlink_port_setup = true;
539 
540 	return err;
541 }
542 
dsa_port_teardown(struct dsa_port * dp)543 static void dsa_port_teardown(struct dsa_port *dp)
544 {
545 	struct devlink_port *dlp = &dp->devlink_port;
546 	struct dsa_switch *ds = dp->ds;
547 	struct dsa_mac_addr *a, *tmp;
548 
549 	if (!dp->setup)
550 		return;
551 
552 	if (ds->ops->port_teardown)
553 		ds->ops->port_teardown(ds, dp->index);
554 
555 	devlink_port_type_clear(dlp);
556 
557 	switch (dp->type) {
558 	case DSA_PORT_TYPE_UNUSED:
559 		break;
560 	case DSA_PORT_TYPE_CPU:
561 		dsa_port_disable(dp);
562 		dsa_port_link_unregister_of(dp);
563 		break;
564 	case DSA_PORT_TYPE_DSA:
565 		dsa_port_disable(dp);
566 		dsa_port_link_unregister_of(dp);
567 		break;
568 	case DSA_PORT_TYPE_USER:
569 		if (dp->slave) {
570 			dsa_slave_destroy(dp->slave);
571 			dp->slave = NULL;
572 		}
573 		break;
574 	}
575 
576 	list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
577 		list_del(&a->list);
578 		kfree(a);
579 	}
580 
581 	list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
582 		list_del(&a->list);
583 		kfree(a);
584 	}
585 
586 	dp->setup = false;
587 }
588 
dsa_port_devlink_teardown(struct dsa_port * dp)589 static void dsa_port_devlink_teardown(struct dsa_port *dp)
590 {
591 	struct devlink_port *dlp = &dp->devlink_port;
592 
593 	if (dp->devlink_port_setup)
594 		devlink_port_unregister(dlp);
595 	dp->devlink_port_setup = false;
596 }
597 
598 /* Destroy the current devlink port, and create a new one which has the UNUSED
599  * flavour. At this point, any call to ds->ops->port_setup has been already
600  * balanced out by a call to ds->ops->port_teardown, so we know that any
601  * devlink port regions the driver had are now unregistered. We then call its
602  * ds->ops->port_setup again, in order for the driver to re-create them on the
603  * new devlink port.
604  */
dsa_port_reinit_as_unused(struct dsa_port * dp)605 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
606 {
607 	struct dsa_switch *ds = dp->ds;
608 	int err;
609 
610 	dsa_port_devlink_teardown(dp);
611 	dp->type = DSA_PORT_TYPE_UNUSED;
612 	err = dsa_port_devlink_setup(dp);
613 	if (err)
614 		return err;
615 
616 	if (ds->ops->port_setup) {
617 		/* On error, leave the devlink port registered,
618 		 * dsa_switch_teardown will clean it up later.
619 		 */
620 		err = ds->ops->port_setup(ds, dp->index);
621 		if (err)
622 			return err;
623 	}
624 
625 	return 0;
626 }
627 
dsa_devlink_info_get(struct devlink * dl,struct devlink_info_req * req,struct netlink_ext_ack * extack)628 static int dsa_devlink_info_get(struct devlink *dl,
629 				struct devlink_info_req *req,
630 				struct netlink_ext_ack *extack)
631 {
632 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
633 
634 	if (ds->ops->devlink_info_get)
635 		return ds->ops->devlink_info_get(ds, req, extack);
636 
637 	return -EOPNOTSUPP;
638 }
639 
dsa_devlink_sb_pool_get(struct devlink * dl,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)640 static int dsa_devlink_sb_pool_get(struct devlink *dl,
641 				   unsigned int sb_index, u16 pool_index,
642 				   struct devlink_sb_pool_info *pool_info)
643 {
644 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
645 
646 	if (!ds->ops->devlink_sb_pool_get)
647 		return -EOPNOTSUPP;
648 
649 	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
650 					    pool_info);
651 }
652 
dsa_devlink_sb_pool_set(struct devlink * dl,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)653 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
654 				   u16 pool_index, u32 size,
655 				   enum devlink_sb_threshold_type threshold_type,
656 				   struct netlink_ext_ack *extack)
657 {
658 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
659 
660 	if (!ds->ops->devlink_sb_pool_set)
661 		return -EOPNOTSUPP;
662 
663 	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
664 					    threshold_type, extack);
665 }
666 
dsa_devlink_sb_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_threshold)667 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
668 					unsigned int sb_index, u16 pool_index,
669 					u32 *p_threshold)
670 {
671 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
672 	int port = dsa_devlink_port_to_port(dlp);
673 
674 	if (!ds->ops->devlink_sb_port_pool_get)
675 		return -EOPNOTSUPP;
676 
677 	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
678 						 pool_index, p_threshold);
679 }
680 
dsa_devlink_sb_port_pool_set(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)681 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
682 					unsigned int sb_index, u16 pool_index,
683 					u32 threshold,
684 					struct netlink_ext_ack *extack)
685 {
686 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
687 	int port = dsa_devlink_port_to_port(dlp);
688 
689 	if (!ds->ops->devlink_sb_port_pool_set)
690 		return -EOPNOTSUPP;
691 
692 	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
693 						 pool_index, threshold, extack);
694 }
695 
696 static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)697 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
698 				unsigned int sb_index, u16 tc_index,
699 				enum devlink_sb_pool_type pool_type,
700 				u16 *p_pool_index, u32 *p_threshold)
701 {
702 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
703 	int port = dsa_devlink_port_to_port(dlp);
704 
705 	if (!ds->ops->devlink_sb_tc_pool_bind_get)
706 		return -EOPNOTSUPP;
707 
708 	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
709 						    tc_index, pool_type,
710 						    p_pool_index, p_threshold);
711 }
712 
713 static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)714 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
715 				unsigned int sb_index, u16 tc_index,
716 				enum devlink_sb_pool_type pool_type,
717 				u16 pool_index, u32 threshold,
718 				struct netlink_ext_ack *extack)
719 {
720 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
721 	int port = dsa_devlink_port_to_port(dlp);
722 
723 	if (!ds->ops->devlink_sb_tc_pool_bind_set)
724 		return -EOPNOTSUPP;
725 
726 	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
727 						    tc_index, pool_type,
728 						    pool_index, threshold,
729 						    extack);
730 }
731 
dsa_devlink_sb_occ_snapshot(struct devlink * dl,unsigned int sb_index)732 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
733 				       unsigned int sb_index)
734 {
735 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
736 
737 	if (!ds->ops->devlink_sb_occ_snapshot)
738 		return -EOPNOTSUPP;
739 
740 	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
741 }
742 
dsa_devlink_sb_occ_max_clear(struct devlink * dl,unsigned int sb_index)743 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
744 					unsigned int sb_index)
745 {
746 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
747 
748 	if (!ds->ops->devlink_sb_occ_max_clear)
749 		return -EOPNOTSUPP;
750 
751 	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
752 }
753 
dsa_devlink_sb_occ_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)754 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
755 					    unsigned int sb_index,
756 					    u16 pool_index, u32 *p_cur,
757 					    u32 *p_max)
758 {
759 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
760 	int port = dsa_devlink_port_to_port(dlp);
761 
762 	if (!ds->ops->devlink_sb_occ_port_pool_get)
763 		return -EOPNOTSUPP;
764 
765 	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
766 						     pool_index, p_cur, p_max);
767 }
768 
769 static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)770 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
771 				    unsigned int sb_index, u16 tc_index,
772 				    enum devlink_sb_pool_type pool_type,
773 				    u32 *p_cur, u32 *p_max)
774 {
775 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
776 	int port = dsa_devlink_port_to_port(dlp);
777 
778 	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
779 		return -EOPNOTSUPP;
780 
781 	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
782 							sb_index, tc_index,
783 							pool_type, p_cur,
784 							p_max);
785 }
786 
787 static const struct devlink_ops dsa_devlink_ops = {
788 	.info_get			= dsa_devlink_info_get,
789 	.sb_pool_get			= dsa_devlink_sb_pool_get,
790 	.sb_pool_set			= dsa_devlink_sb_pool_set,
791 	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
792 	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
793 	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
794 	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
795 	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
796 	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
797 	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
798 	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
799 };
800 
dsa_switch_setup_tag_protocol(struct dsa_switch * ds)801 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
802 {
803 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
804 	struct dsa_switch_tree *dst = ds->dst;
805 	int port, err;
806 
807 	if (tag_ops->proto == dst->default_proto)
808 		return 0;
809 
810 	for (port = 0; port < ds->num_ports; port++) {
811 		if (!dsa_is_cpu_port(ds, port))
812 			continue;
813 
814 		rtnl_lock();
815 		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
816 		rtnl_unlock();
817 		if (err) {
818 			dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
819 				tag_ops->name, ERR_PTR(err));
820 			return err;
821 		}
822 	}
823 
824 	return 0;
825 }
826 
dsa_switch_setup(struct dsa_switch * ds)827 static int dsa_switch_setup(struct dsa_switch *ds)
828 {
829 	struct dsa_devlink_priv *dl_priv;
830 	struct dsa_port *dp;
831 	int err;
832 
833 	if (ds->setup)
834 		return 0;
835 
836 	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
837 	 * driver and before ops->setup() has run, since the switch drivers and
838 	 * the slave MDIO bus driver rely on these values for probing PHY
839 	 * devices or not
840 	 */
841 	ds->phys_mii_mask |= dsa_user_ports(ds);
842 
843 	/* Add the switch to devlink before calling setup, so that setup can
844 	 * add dpipe tables
845 	 */
846 	ds->devlink =
847 		devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
848 	if (!ds->devlink)
849 		return -ENOMEM;
850 	dl_priv = devlink_priv(ds->devlink);
851 	dl_priv->ds = ds;
852 
853 	err = devlink_register(ds->devlink);
854 	if (err)
855 		goto free_devlink;
856 
857 	/* Setup devlink port instances now, so that the switch
858 	 * setup() can register regions etc, against the ports
859 	 */
860 	list_for_each_entry(dp, &ds->dst->ports, list) {
861 		if (dp->ds == ds) {
862 			err = dsa_port_devlink_setup(dp);
863 			if (err)
864 				goto unregister_devlink_ports;
865 		}
866 	}
867 
868 	err = dsa_switch_register_notifier(ds);
869 	if (err)
870 		goto unregister_devlink_ports;
871 
872 	ds->configure_vlan_while_not_filtering = true;
873 
874 	err = ds->ops->setup(ds);
875 	if (err < 0)
876 		goto unregister_notifier;
877 
878 	err = dsa_switch_setup_tag_protocol(ds);
879 	if (err)
880 		goto teardown;
881 
882 	devlink_params_publish(ds->devlink);
883 
884 	if (!ds->slave_mii_bus && ds->ops->phy_read) {
885 		ds->slave_mii_bus = mdiobus_alloc();
886 		if (!ds->slave_mii_bus) {
887 			err = -ENOMEM;
888 			goto teardown;
889 		}
890 
891 		dsa_slave_mii_bus_init(ds);
892 
893 		err = mdiobus_register(ds->slave_mii_bus);
894 		if (err < 0)
895 			goto free_slave_mii_bus;
896 	}
897 
898 	ds->setup = true;
899 
900 	return 0;
901 
902 free_slave_mii_bus:
903 	if (ds->slave_mii_bus && ds->ops->phy_read)
904 		mdiobus_free(ds->slave_mii_bus);
905 teardown:
906 	if (ds->ops->teardown)
907 		ds->ops->teardown(ds);
908 unregister_notifier:
909 	dsa_switch_unregister_notifier(ds);
910 unregister_devlink_ports:
911 	list_for_each_entry(dp, &ds->dst->ports, list)
912 		if (dp->ds == ds)
913 			dsa_port_devlink_teardown(dp);
914 	devlink_unregister(ds->devlink);
915 free_devlink:
916 	devlink_free(ds->devlink);
917 	ds->devlink = NULL;
918 
919 	return err;
920 }
921 
dsa_switch_teardown(struct dsa_switch * ds)922 static void dsa_switch_teardown(struct dsa_switch *ds)
923 {
924 	struct dsa_port *dp;
925 
926 	if (!ds->setup)
927 		return;
928 
929 	if (ds->slave_mii_bus && ds->ops->phy_read) {
930 		mdiobus_unregister(ds->slave_mii_bus);
931 		mdiobus_free(ds->slave_mii_bus);
932 		ds->slave_mii_bus = NULL;
933 	}
934 
935 	dsa_switch_unregister_notifier(ds);
936 
937 	if (ds->ops->teardown)
938 		ds->ops->teardown(ds);
939 
940 	if (ds->devlink) {
941 		list_for_each_entry(dp, &ds->dst->ports, list)
942 			if (dp->ds == ds)
943 				dsa_port_devlink_teardown(dp);
944 		devlink_unregister(ds->devlink);
945 		devlink_free(ds->devlink);
946 		ds->devlink = NULL;
947 	}
948 
949 	ds->setup = false;
950 }
951 
952 /* First tear down the non-shared, then the shared ports. This ensures that
953  * all work items scheduled by our switchdev handlers for user ports have
954  * completed before we destroy the refcounting kept on the shared ports.
955  */
dsa_tree_teardown_ports(struct dsa_switch_tree * dst)956 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
957 {
958 	struct dsa_port *dp;
959 
960 	list_for_each_entry(dp, &dst->ports, list)
961 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
962 			dsa_port_teardown(dp);
963 
964 	dsa_flush_workqueue();
965 
966 	list_for_each_entry(dp, &dst->ports, list)
967 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
968 			dsa_port_teardown(dp);
969 }
970 
dsa_tree_teardown_switches(struct dsa_switch_tree * dst)971 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
972 {
973 	struct dsa_port *dp;
974 
975 	list_for_each_entry(dp, &dst->ports, list)
976 		dsa_switch_teardown(dp->ds);
977 }
978 
dsa_tree_setup_switches(struct dsa_switch_tree * dst)979 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
980 {
981 	struct dsa_port *dp;
982 	int err;
983 
984 	list_for_each_entry(dp, &dst->ports, list) {
985 		err = dsa_switch_setup(dp->ds);
986 		if (err)
987 			goto teardown;
988 	}
989 
990 	list_for_each_entry(dp, &dst->ports, list) {
991 		err = dsa_port_setup(dp);
992 		if (err) {
993 			err = dsa_port_reinit_as_unused(dp);
994 			if (err)
995 				goto teardown;
996 		}
997 	}
998 
999 	return 0;
1000 
1001 teardown:
1002 	dsa_tree_teardown_ports(dst);
1003 
1004 	dsa_tree_teardown_switches(dst);
1005 
1006 	return err;
1007 }
1008 
dsa_tree_setup_master(struct dsa_switch_tree * dst)1009 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1010 {
1011 	struct dsa_port *dp;
1012 	int err;
1013 
1014 	list_for_each_entry(dp, &dst->ports, list) {
1015 		if (dsa_port_is_cpu(dp)) {
1016 			err = dsa_master_setup(dp->master, dp);
1017 			if (err)
1018 				return err;
1019 		}
1020 	}
1021 
1022 	return 0;
1023 }
1024 
dsa_tree_teardown_master(struct dsa_switch_tree * dst)1025 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1026 {
1027 	struct dsa_port *dp;
1028 
1029 	list_for_each_entry(dp, &dst->ports, list)
1030 		if (dsa_port_is_cpu(dp))
1031 			dsa_master_teardown(dp->master);
1032 }
1033 
dsa_tree_setup_lags(struct dsa_switch_tree * dst)1034 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1035 {
1036 	unsigned int len = 0;
1037 	struct dsa_port *dp;
1038 
1039 	list_for_each_entry(dp, &dst->ports, list) {
1040 		if (dp->ds->num_lag_ids > len)
1041 			len = dp->ds->num_lag_ids;
1042 	}
1043 
1044 	if (!len)
1045 		return 0;
1046 
1047 	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1048 	if (!dst->lags)
1049 		return -ENOMEM;
1050 
1051 	dst->lags_len = len;
1052 	return 0;
1053 }
1054 
dsa_tree_teardown_lags(struct dsa_switch_tree * dst)1055 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1056 {
1057 	kfree(dst->lags);
1058 }
1059 
dsa_tree_setup(struct dsa_switch_tree * dst)1060 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1061 {
1062 	bool complete;
1063 	int err;
1064 
1065 	if (dst->setup) {
1066 		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1067 		       dst->index);
1068 		return -EEXIST;
1069 	}
1070 
1071 	complete = dsa_tree_setup_routing_table(dst);
1072 	if (!complete)
1073 		return 0;
1074 
1075 	err = dsa_tree_setup_cpu_ports(dst);
1076 	if (err)
1077 		return err;
1078 
1079 	err = dsa_tree_setup_switches(dst);
1080 	if (err)
1081 		goto teardown_cpu_ports;
1082 
1083 	err = dsa_tree_setup_master(dst);
1084 	if (err)
1085 		goto teardown_switches;
1086 
1087 	err = dsa_tree_setup_lags(dst);
1088 	if (err)
1089 		goto teardown_master;
1090 
1091 	dst->setup = true;
1092 
1093 	pr_info("DSA: tree %d setup\n", dst->index);
1094 
1095 	return 0;
1096 
1097 teardown_master:
1098 	dsa_tree_teardown_master(dst);
1099 teardown_switches:
1100 	dsa_tree_teardown_ports(dst);
1101 	dsa_tree_teardown_switches(dst);
1102 teardown_cpu_ports:
1103 	dsa_tree_teardown_cpu_ports(dst);
1104 
1105 	return err;
1106 }
1107 
dsa_tree_teardown(struct dsa_switch_tree * dst)1108 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1109 {
1110 	struct dsa_link *dl, *next;
1111 
1112 	if (!dst->setup)
1113 		return;
1114 
1115 	dsa_tree_teardown_lags(dst);
1116 
1117 	dsa_tree_teardown_master(dst);
1118 
1119 	dsa_tree_teardown_ports(dst);
1120 
1121 	dsa_tree_teardown_switches(dst);
1122 
1123 	dsa_tree_teardown_cpu_ports(dst);
1124 
1125 	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1126 		list_del(&dl->list);
1127 		kfree(dl);
1128 	}
1129 
1130 	pr_info("DSA: tree %d torn down\n", dst->index);
1131 
1132 	dst->setup = false;
1133 }
1134 
1135 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1136  * is that all DSA switches within a tree share the same tagger, otherwise
1137  * they would have formed disjoint trees (different "dsa,member" values).
1138  */
dsa_tree_change_tag_proto(struct dsa_switch_tree * dst,struct net_device * master,const struct dsa_device_ops * tag_ops,const struct dsa_device_ops * old_tag_ops)1139 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1140 			      struct net_device *master,
1141 			      const struct dsa_device_ops *tag_ops,
1142 			      const struct dsa_device_ops *old_tag_ops)
1143 {
1144 	struct dsa_notifier_tag_proto_info info;
1145 	struct dsa_port *dp;
1146 	int err = -EBUSY;
1147 
1148 	if (!rtnl_trylock())
1149 		return restart_syscall();
1150 
1151 	/* At the moment we don't allow changing the tag protocol under
1152 	 * traffic. The rtnl_mutex also happens to serialize concurrent
1153 	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1154 	 * restriction, there needs to be another mutex which serializes this.
1155 	 */
1156 	if (master->flags & IFF_UP)
1157 		goto out_unlock;
1158 
1159 	list_for_each_entry(dp, &dst->ports, list) {
1160 		if (!dsa_is_user_port(dp->ds, dp->index))
1161 			continue;
1162 
1163 		if (dp->slave->flags & IFF_UP)
1164 			goto out_unlock;
1165 	}
1166 
1167 	info.tag_ops = tag_ops;
1168 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1169 	if (err)
1170 		goto out_unwind_tagger;
1171 
1172 	dst->tag_ops = tag_ops;
1173 
1174 	rtnl_unlock();
1175 
1176 	return 0;
1177 
1178 out_unwind_tagger:
1179 	info.tag_ops = old_tag_ops;
1180 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1181 out_unlock:
1182 	rtnl_unlock();
1183 	return err;
1184 }
1185 
dsa_port_touch(struct dsa_switch * ds,int index)1186 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1187 {
1188 	struct dsa_switch_tree *dst = ds->dst;
1189 	struct dsa_port *dp;
1190 
1191 	list_for_each_entry(dp, &dst->ports, list)
1192 		if (dp->ds == ds && dp->index == index)
1193 			return dp;
1194 
1195 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1196 	if (!dp)
1197 		return NULL;
1198 
1199 	dp->ds = ds;
1200 	dp->index = index;
1201 	dp->bridge_num = -1;
1202 
1203 	INIT_LIST_HEAD(&dp->list);
1204 	list_add_tail(&dp->list, &dst->ports);
1205 
1206 	return dp;
1207 }
1208 
dsa_port_parse_user(struct dsa_port * dp,const char * name)1209 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1210 {
1211 	if (!name)
1212 		name = "eth%d";
1213 
1214 	dp->type = DSA_PORT_TYPE_USER;
1215 	dp->name = name;
1216 
1217 	return 0;
1218 }
1219 
dsa_port_parse_dsa(struct dsa_port * dp)1220 static int dsa_port_parse_dsa(struct dsa_port *dp)
1221 {
1222 	dp->type = DSA_PORT_TYPE_DSA;
1223 
1224 	return 0;
1225 }
1226 
dsa_get_tag_protocol(struct dsa_port * dp,struct net_device * master)1227 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1228 						  struct net_device *master)
1229 {
1230 	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1231 	struct dsa_switch *mds, *ds = dp->ds;
1232 	unsigned int mdp_upstream;
1233 	struct dsa_port *mdp;
1234 
1235 	/* It is possible to stack DSA switches onto one another when that
1236 	 * happens the switch driver may want to know if its tagging protocol
1237 	 * is going to work in such a configuration.
1238 	 */
1239 	if (dsa_slave_dev_check(master)) {
1240 		mdp = dsa_slave_to_port(master);
1241 		mds = mdp->ds;
1242 		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1243 		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1244 							  DSA_TAG_PROTO_NONE);
1245 	}
1246 
1247 	/* If the master device is not itself a DSA slave in a disjoint DSA
1248 	 * tree, then return immediately.
1249 	 */
1250 	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1251 }
1252 
dsa_port_parse_cpu(struct dsa_port * dp,struct net_device * master,const char * user_protocol)1253 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1254 			      const char *user_protocol)
1255 {
1256 	const struct dsa_device_ops *tag_ops = NULL;
1257 	struct dsa_switch *ds = dp->ds;
1258 	struct dsa_switch_tree *dst = ds->dst;
1259 	enum dsa_tag_protocol default_proto;
1260 
1261 	/* Find out which protocol the switch would prefer. */
1262 	default_proto = dsa_get_tag_protocol(dp, master);
1263 	if (dst->default_proto) {
1264 		if (dst->default_proto != default_proto) {
1265 			dev_err(ds->dev,
1266 				"A DSA switch tree can have only one tagging protocol\n");
1267 			return -EINVAL;
1268 		}
1269 	} else {
1270 		dst->default_proto = default_proto;
1271 	}
1272 
1273 	/* See if the user wants to override that preference. */
1274 	if (user_protocol) {
1275 		if (!ds->ops->change_tag_protocol) {
1276 			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1277 			return -EINVAL;
1278 		}
1279 
1280 		tag_ops = dsa_find_tagger_by_name(user_protocol);
1281 		if (IS_ERR(tag_ops)) {
1282 			dev_warn(ds->dev,
1283 				 "Failed to find a tagging driver for protocol %s, using default\n",
1284 				 user_protocol);
1285 			tag_ops = NULL;
1286 		}
1287 	}
1288 
1289 	if (!tag_ops)
1290 		tag_ops = dsa_tag_driver_get(default_proto);
1291 
1292 	if (IS_ERR(tag_ops)) {
1293 		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1294 			return -EPROBE_DEFER;
1295 
1296 		dev_warn(ds->dev, "No tagger for this switch\n");
1297 		return PTR_ERR(tag_ops);
1298 	}
1299 
1300 	if (dst->tag_ops) {
1301 		if (dst->tag_ops != tag_ops) {
1302 			dev_err(ds->dev,
1303 				"A DSA switch tree can have only one tagging protocol\n");
1304 
1305 			dsa_tag_driver_put(tag_ops);
1306 			return -EINVAL;
1307 		}
1308 
1309 		/* In the case of multiple CPU ports per switch, the tagging
1310 		 * protocol is still reference-counted only per switch tree.
1311 		 */
1312 		dsa_tag_driver_put(tag_ops);
1313 	} else {
1314 		dst->tag_ops = tag_ops;
1315 	}
1316 
1317 	dp->master = master;
1318 	dp->type = DSA_PORT_TYPE_CPU;
1319 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1320 	dp->dst = dst;
1321 
1322 	/* At this point, the tree may be configured to use a different
1323 	 * tagger than the one chosen by the switch driver during
1324 	 * .setup, in the case when a user selects a custom protocol
1325 	 * through the DT.
1326 	 *
1327 	 * This is resolved by syncing the driver with the tree in
1328 	 * dsa_switch_setup_tag_protocol once .setup has run and the
1329 	 * driver is ready to accept calls to .change_tag_protocol. If
1330 	 * the driver does not support the custom protocol at that
1331 	 * point, the tree is wholly rejected, thereby ensuring that the
1332 	 * tree and driver are always in agreement on the protocol to
1333 	 * use.
1334 	 */
1335 	return 0;
1336 }
1337 
dsa_port_parse_of(struct dsa_port * dp,struct device_node * dn)1338 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1339 {
1340 	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1341 	const char *name = of_get_property(dn, "label", NULL);
1342 	bool link = of_property_read_bool(dn, "link");
1343 
1344 	dp->dn = dn;
1345 
1346 	if (ethernet) {
1347 		struct net_device *master;
1348 		const char *user_protocol;
1349 
1350 		master = of_find_net_device_by_node(ethernet);
1351 		of_node_put(ethernet);
1352 		if (!master)
1353 			return -EPROBE_DEFER;
1354 
1355 		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1356 		return dsa_port_parse_cpu(dp, master, user_protocol);
1357 	}
1358 
1359 	if (link)
1360 		return dsa_port_parse_dsa(dp);
1361 
1362 	return dsa_port_parse_user(dp, name);
1363 }
1364 
dsa_switch_parse_ports_of(struct dsa_switch * ds,struct device_node * dn)1365 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1366 				     struct device_node *dn)
1367 {
1368 	struct device_node *ports, *port;
1369 	struct dsa_port *dp;
1370 	int err = 0;
1371 	u32 reg;
1372 
1373 	ports = of_get_child_by_name(dn, "ports");
1374 	if (!ports) {
1375 		/* The second possibility is "ethernet-ports" */
1376 		ports = of_get_child_by_name(dn, "ethernet-ports");
1377 		if (!ports) {
1378 			dev_err(ds->dev, "no ports child node found\n");
1379 			return -EINVAL;
1380 		}
1381 	}
1382 
1383 	for_each_available_child_of_node(ports, port) {
1384 		err = of_property_read_u32(port, "reg", &reg);
1385 		if (err) {
1386 			of_node_put(port);
1387 			goto out_put_node;
1388 		}
1389 
1390 		if (reg >= ds->num_ports) {
1391 			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1392 				port, reg, ds->num_ports);
1393 			of_node_put(port);
1394 			err = -EINVAL;
1395 			goto out_put_node;
1396 		}
1397 
1398 		dp = dsa_to_port(ds, reg);
1399 
1400 		err = dsa_port_parse_of(dp, port);
1401 		if (err) {
1402 			of_node_put(port);
1403 			goto out_put_node;
1404 		}
1405 	}
1406 
1407 out_put_node:
1408 	of_node_put(ports);
1409 	return err;
1410 }
1411 
dsa_switch_parse_member_of(struct dsa_switch * ds,struct device_node * dn)1412 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1413 				      struct device_node *dn)
1414 {
1415 	u32 m[2] = { 0, 0 };
1416 	int sz;
1417 
1418 	/* Don't error out if this optional property isn't found */
1419 	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1420 	if (sz < 0 && sz != -EINVAL)
1421 		return sz;
1422 
1423 	ds->index = m[1];
1424 
1425 	ds->dst = dsa_tree_touch(m[0]);
1426 	if (!ds->dst)
1427 		return -ENOMEM;
1428 
1429 	if (dsa_switch_find(ds->dst->index, ds->index)) {
1430 		dev_err(ds->dev,
1431 			"A DSA switch with index %d already exists in tree %d\n",
1432 			ds->index, ds->dst->index);
1433 		return -EEXIST;
1434 	}
1435 
1436 	if (ds->dst->last_switch < ds->index)
1437 		ds->dst->last_switch = ds->index;
1438 
1439 	return 0;
1440 }
1441 
dsa_switch_touch_ports(struct dsa_switch * ds)1442 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1443 {
1444 	struct dsa_port *dp;
1445 	int port;
1446 
1447 	for (port = 0; port < ds->num_ports; port++) {
1448 		dp = dsa_port_touch(ds, port);
1449 		if (!dp)
1450 			return -ENOMEM;
1451 	}
1452 
1453 	return 0;
1454 }
1455 
dsa_switch_parse_of(struct dsa_switch * ds,struct device_node * dn)1456 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1457 {
1458 	int err;
1459 
1460 	err = dsa_switch_parse_member_of(ds, dn);
1461 	if (err)
1462 		return err;
1463 
1464 	err = dsa_switch_touch_ports(ds);
1465 	if (err)
1466 		return err;
1467 
1468 	return dsa_switch_parse_ports_of(ds, dn);
1469 }
1470 
dsa_port_parse(struct dsa_port * dp,const char * name,struct device * dev)1471 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1472 			  struct device *dev)
1473 {
1474 	if (!strcmp(name, "cpu")) {
1475 		struct net_device *master;
1476 
1477 		master = dsa_dev_to_net_device(dev);
1478 		if (!master)
1479 			return -EPROBE_DEFER;
1480 
1481 		dev_put(master);
1482 
1483 		return dsa_port_parse_cpu(dp, master, NULL);
1484 	}
1485 
1486 	if (!strcmp(name, "dsa"))
1487 		return dsa_port_parse_dsa(dp);
1488 
1489 	return dsa_port_parse_user(dp, name);
1490 }
1491 
dsa_switch_parse_ports(struct dsa_switch * ds,struct dsa_chip_data * cd)1492 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1493 				  struct dsa_chip_data *cd)
1494 {
1495 	bool valid_name_found = false;
1496 	struct dsa_port *dp;
1497 	struct device *dev;
1498 	const char *name;
1499 	unsigned int i;
1500 	int err;
1501 
1502 	for (i = 0; i < DSA_MAX_PORTS; i++) {
1503 		name = cd->port_names[i];
1504 		dev = cd->netdev[i];
1505 		dp = dsa_to_port(ds, i);
1506 
1507 		if (!name)
1508 			continue;
1509 
1510 		err = dsa_port_parse(dp, name, dev);
1511 		if (err)
1512 			return err;
1513 
1514 		valid_name_found = true;
1515 	}
1516 
1517 	if (!valid_name_found && i == DSA_MAX_PORTS)
1518 		return -EINVAL;
1519 
1520 	return 0;
1521 }
1522 
dsa_switch_parse(struct dsa_switch * ds,struct dsa_chip_data * cd)1523 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1524 {
1525 	int err;
1526 
1527 	ds->cd = cd;
1528 
1529 	/* We don't support interconnected switches nor multiple trees via
1530 	 * platform data, so this is the unique switch of the tree.
1531 	 */
1532 	ds->index = 0;
1533 	ds->dst = dsa_tree_touch(0);
1534 	if (!ds->dst)
1535 		return -ENOMEM;
1536 
1537 	err = dsa_switch_touch_ports(ds);
1538 	if (err)
1539 		return err;
1540 
1541 	return dsa_switch_parse_ports(ds, cd);
1542 }
1543 
dsa_switch_release_ports(struct dsa_switch * ds)1544 static void dsa_switch_release_ports(struct dsa_switch *ds)
1545 {
1546 	struct dsa_switch_tree *dst = ds->dst;
1547 	struct dsa_port *dp, *next;
1548 
1549 	list_for_each_entry_safe(dp, next, &dst->ports, list) {
1550 		if (dp->ds != ds)
1551 			continue;
1552 		list_del(&dp->list);
1553 		kfree(dp);
1554 	}
1555 }
1556 
dsa_switch_probe(struct dsa_switch * ds)1557 static int dsa_switch_probe(struct dsa_switch *ds)
1558 {
1559 	struct dsa_switch_tree *dst;
1560 	struct dsa_chip_data *pdata;
1561 	struct device_node *np;
1562 	int err;
1563 
1564 	if (!ds->dev)
1565 		return -ENODEV;
1566 
1567 	pdata = ds->dev->platform_data;
1568 	np = ds->dev->of_node;
1569 
1570 	if (!ds->num_ports)
1571 		return -EINVAL;
1572 
1573 	if (np) {
1574 		err = dsa_switch_parse_of(ds, np);
1575 		if (err)
1576 			dsa_switch_release_ports(ds);
1577 	} else if (pdata) {
1578 		err = dsa_switch_parse(ds, pdata);
1579 		if (err)
1580 			dsa_switch_release_ports(ds);
1581 	} else {
1582 		err = -ENODEV;
1583 	}
1584 
1585 	if (err)
1586 		return err;
1587 
1588 	dst = ds->dst;
1589 	dsa_tree_get(dst);
1590 	err = dsa_tree_setup(dst);
1591 	if (err) {
1592 		dsa_switch_release_ports(ds);
1593 		dsa_tree_put(dst);
1594 	}
1595 
1596 	return err;
1597 }
1598 
dsa_register_switch(struct dsa_switch * ds)1599 int dsa_register_switch(struct dsa_switch *ds)
1600 {
1601 	int err;
1602 
1603 	mutex_lock(&dsa2_mutex);
1604 	err = dsa_switch_probe(ds);
1605 	dsa_tree_put(ds->dst);
1606 	mutex_unlock(&dsa2_mutex);
1607 
1608 	return err;
1609 }
1610 EXPORT_SYMBOL_GPL(dsa_register_switch);
1611 
dsa_switch_remove(struct dsa_switch * ds)1612 static void dsa_switch_remove(struct dsa_switch *ds)
1613 {
1614 	struct dsa_switch_tree *dst = ds->dst;
1615 
1616 	dsa_tree_teardown(dst);
1617 	dsa_switch_release_ports(ds);
1618 	dsa_tree_put(dst);
1619 }
1620 
dsa_unregister_switch(struct dsa_switch * ds)1621 void dsa_unregister_switch(struct dsa_switch *ds)
1622 {
1623 	mutex_lock(&dsa2_mutex);
1624 	dsa_switch_remove(ds);
1625 	mutex_unlock(&dsa2_mutex);
1626 }
1627 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1628 
1629 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1630  * blocking that operation from completion, due to the dev_hold taken inside
1631  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1632  * the DSA master, so that the system can reboot successfully.
1633  */
dsa_switch_shutdown(struct dsa_switch * ds)1634 void dsa_switch_shutdown(struct dsa_switch *ds)
1635 {
1636 	struct net_device *master, *slave_dev;
1637 	LIST_HEAD(unregister_list);
1638 	struct dsa_port *dp;
1639 
1640 	mutex_lock(&dsa2_mutex);
1641 
1642 	if (!ds->setup)
1643 		goto out;
1644 
1645 	rtnl_lock();
1646 
1647 	list_for_each_entry(dp, &ds->dst->ports, list) {
1648 		if (dp->ds != ds)
1649 			continue;
1650 
1651 		if (!dsa_port_is_user(dp))
1652 			continue;
1653 
1654 		master = dp->cpu_dp->master;
1655 		slave_dev = dp->slave;
1656 
1657 		netdev_upper_dev_unlink(master, slave_dev);
1658 		/* Just unlinking ourselves as uppers of the master is not
1659 		 * sufficient. When the master net device unregisters, that will
1660 		 * also call dev_close, which we will catch as NETDEV_GOING_DOWN
1661 		 * and trigger a dev_close on our own devices (dsa_slave_close).
1662 		 * In turn, that will call dev_mc_unsync on the master's net
1663 		 * device. If the master is also a DSA switch port, this will
1664 		 * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
1665 		 * its own master. Lockdep will complain about the fact that
1666 		 * all cascaded masters have the same dsa_master_addr_list_lock_key,
1667 		 * which it normally would not do if the cascaded masters would
1668 		 * be in a proper upper/lower relationship, which we've just
1669 		 * destroyed.
1670 		 * To suppress the lockdep warnings, let's actually unregister
1671 		 * the DSA slave interfaces too, to avoid the nonsensical
1672 		 * multicast address list synchronization on shutdown.
1673 		 */
1674 		unregister_netdevice_queue(slave_dev, &unregister_list);
1675 	}
1676 	unregister_netdevice_many(&unregister_list);
1677 
1678 	rtnl_unlock();
1679 out:
1680 	mutex_unlock(&dsa2_mutex);
1681 }
1682 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1683