• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa.c - Hardware switch handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/list.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/notifier.h>
14 #include <linux/of.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_platform.h>
17 #include <linux/of_net.h>
18 #include <linux/netdevice.h>
19 #include <linux/sysfs.h>
20 #include <linux/phy_fixed.h>
21 #include <linux/ptp_classify.h>
22 #include <linux/etherdevice.h>
23 
24 #include "dsa_priv.h"
25 
26 static LIST_HEAD(dsa_tag_drivers_list);
27 static DEFINE_MUTEX(dsa_tag_drivers_lock);
28 
dsa_slave_notag_xmit(struct sk_buff * skb,struct net_device * dev)29 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
30 					    struct net_device *dev)
31 {
32 	/* Just return the original SKB */
33 	return skb;
34 }
35 
36 static const struct dsa_device_ops none_ops = {
37 	.name	= "none",
38 	.proto	= DSA_TAG_PROTO_NONE,
39 	.xmit	= dsa_slave_notag_xmit,
40 	.rcv	= NULL,
41 };
42 
43 DSA_TAG_DRIVER(none_ops);
44 
dsa_tag_driver_register(struct dsa_tag_driver * dsa_tag_driver,struct module * owner)45 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
46 				    struct module *owner)
47 {
48 	dsa_tag_driver->owner = owner;
49 
50 	mutex_lock(&dsa_tag_drivers_lock);
51 	list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
52 	mutex_unlock(&dsa_tag_drivers_lock);
53 }
54 
dsa_tag_drivers_register(struct dsa_tag_driver * dsa_tag_driver_array[],unsigned int count,struct module * owner)55 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
56 			      unsigned int count, struct module *owner)
57 {
58 	unsigned int i;
59 
60 	for (i = 0; i < count; i++)
61 		dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
62 }
63 
dsa_tag_driver_unregister(struct dsa_tag_driver * dsa_tag_driver)64 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
65 {
66 	mutex_lock(&dsa_tag_drivers_lock);
67 	list_del(&dsa_tag_driver->list);
68 	mutex_unlock(&dsa_tag_drivers_lock);
69 }
70 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
71 
dsa_tag_drivers_unregister(struct dsa_tag_driver * dsa_tag_driver_array[],unsigned int count)72 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
73 				unsigned int count)
74 {
75 	unsigned int i;
76 
77 	for (i = 0; i < count; i++)
78 		dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
79 }
80 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
81 
dsa_tag_protocol_to_str(const struct dsa_device_ops * ops)82 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
83 {
84 	return ops->name;
85 };
86 
dsa_tag_driver_get(int tag_protocol)87 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
88 {
89 	struct dsa_tag_driver *dsa_tag_driver;
90 	const struct dsa_device_ops *ops;
91 	bool found = false;
92 
93 	request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
94 
95 	mutex_lock(&dsa_tag_drivers_lock);
96 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
97 		ops = dsa_tag_driver->ops;
98 		if (ops->proto == tag_protocol) {
99 			found = true;
100 			break;
101 		}
102 	}
103 
104 	if (found) {
105 		if (!try_module_get(dsa_tag_driver->owner))
106 			ops = ERR_PTR(-ENOPROTOOPT);
107 	} else {
108 		ops = ERR_PTR(-ENOPROTOOPT);
109 	}
110 
111 	mutex_unlock(&dsa_tag_drivers_lock);
112 
113 	return ops;
114 }
115 
dsa_tag_driver_put(const struct dsa_device_ops * ops)116 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
117 {
118 	struct dsa_tag_driver *dsa_tag_driver;
119 
120 	mutex_lock(&dsa_tag_drivers_lock);
121 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
122 		if (dsa_tag_driver->ops == ops) {
123 			module_put(dsa_tag_driver->owner);
124 			break;
125 		}
126 	}
127 	mutex_unlock(&dsa_tag_drivers_lock);
128 }
129 
dev_is_class(struct device * dev,void * class)130 static int dev_is_class(struct device *dev, void *class)
131 {
132 	if (dev->class != NULL && !strcmp(dev->class->name, class))
133 		return 1;
134 
135 	return 0;
136 }
137 
dev_find_class(struct device * parent,char * class)138 static struct device *dev_find_class(struct device *parent, char *class)
139 {
140 	if (dev_is_class(parent, class)) {
141 		get_device(parent);
142 		return parent;
143 	}
144 
145 	return device_find_child(parent, class, dev_is_class);
146 }
147 
dsa_dev_to_net_device(struct device * dev)148 struct net_device *dsa_dev_to_net_device(struct device *dev)
149 {
150 	struct device *d;
151 
152 	d = dev_find_class(dev, "net");
153 	if (d != NULL) {
154 		struct net_device *nd;
155 
156 		nd = to_net_dev(d);
157 		dev_hold(nd);
158 		put_device(d);
159 
160 		return nd;
161 	}
162 
163 	return NULL;
164 }
165 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
166 
167 /* Determine if we should defer delivery of skb until we have a rx timestamp.
168  *
169  * Called from dsa_switch_rcv. For now, this will only work if tagging is
170  * enabled on the switch. Normally the MAC driver would retrieve the hardware
171  * timestamp when it reads the packet out of the hardware. However in a DSA
172  * switch, the DSA driver owning the interface to which the packet is
173  * delivered is never notified unless we do so here.
174  */
dsa_skb_defer_rx_timestamp(struct dsa_slave_priv * p,struct sk_buff * skb)175 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
176 				       struct sk_buff *skb)
177 {
178 	struct dsa_switch *ds = p->dp->ds;
179 	unsigned int type;
180 
181 	if (skb_headroom(skb) < ETH_HLEN)
182 		return false;
183 
184 	__skb_push(skb, ETH_HLEN);
185 
186 	type = ptp_classify_raw(skb);
187 
188 	__skb_pull(skb, ETH_HLEN);
189 
190 	if (type == PTP_CLASS_NONE)
191 		return false;
192 
193 	if (likely(ds->ops->port_rxtstamp))
194 		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
195 
196 	return false;
197 }
198 
dsa_switch_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * unused)199 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
200 			  struct packet_type *pt, struct net_device *unused)
201 {
202 	struct dsa_port *cpu_dp = dev->dsa_ptr;
203 	struct sk_buff *nskb = NULL;
204 	struct pcpu_sw_netstats *s;
205 	struct dsa_slave_priv *p;
206 
207 	if (unlikely(!cpu_dp)) {
208 		kfree_skb(skb);
209 		return 0;
210 	}
211 
212 	skb = skb_unshare(skb, GFP_ATOMIC);
213 	if (!skb)
214 		return 0;
215 
216 	nskb = cpu_dp->rcv(skb, dev, pt);
217 	if (!nskb) {
218 		kfree_skb(skb);
219 		return 0;
220 	}
221 
222 	skb = nskb;
223 	p = netdev_priv(skb->dev);
224 	skb_push(skb, ETH_HLEN);
225 	skb->pkt_type = PACKET_HOST;
226 	skb->protocol = eth_type_trans(skb, skb->dev);
227 
228 	if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
229 		nskb = dsa_untag_bridge_pvid(skb);
230 		if (!nskb) {
231 			kfree_skb(skb);
232 			return 0;
233 		}
234 		skb = nskb;
235 	}
236 
237 	s = this_cpu_ptr(p->stats64);
238 	u64_stats_update_begin(&s->syncp);
239 	s->rx_packets++;
240 	s->rx_bytes += skb->len;
241 	u64_stats_update_end(&s->syncp);
242 
243 	if (dsa_skb_defer_rx_timestamp(p, skb))
244 		return 0;
245 
246 	gro_cells_receive(&p->gcells, skb);
247 
248 	return 0;
249 }
250 
251 #ifdef CONFIG_PM_SLEEP
dsa_is_port_initialized(struct dsa_switch * ds,int p)252 static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
253 {
254 	const struct dsa_port *dp = dsa_to_port(ds, p);
255 
256 	return dp->type == DSA_PORT_TYPE_USER && dp->slave;
257 }
258 
dsa_switch_suspend(struct dsa_switch * ds)259 int dsa_switch_suspend(struct dsa_switch *ds)
260 {
261 	int i, ret = 0;
262 
263 	/* Suspend slave network devices */
264 	for (i = 0; i < ds->num_ports; i++) {
265 		if (!dsa_is_port_initialized(ds, i))
266 			continue;
267 
268 		ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
269 		if (ret)
270 			return ret;
271 	}
272 
273 	if (ds->ops->suspend)
274 		ret = ds->ops->suspend(ds);
275 
276 	return ret;
277 }
278 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
279 
dsa_switch_resume(struct dsa_switch * ds)280 int dsa_switch_resume(struct dsa_switch *ds)
281 {
282 	int i, ret = 0;
283 
284 	if (ds->ops->resume)
285 		ret = ds->ops->resume(ds);
286 
287 	if (ret)
288 		return ret;
289 
290 	/* Resume slave network devices */
291 	for (i = 0; i < ds->num_ports; i++) {
292 		if (!dsa_is_port_initialized(ds, i))
293 			continue;
294 
295 		ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
296 		if (ret)
297 			return ret;
298 	}
299 
300 	return 0;
301 }
302 EXPORT_SYMBOL_GPL(dsa_switch_resume);
303 #endif
304 
305 static struct packet_type dsa_pack_type __read_mostly = {
306 	.type	= cpu_to_be16(ETH_P_XDSA),
307 	.func	= dsa_switch_rcv,
308 };
309 
310 static struct workqueue_struct *dsa_owq;
311 
dsa_schedule_work(struct work_struct * work)312 bool dsa_schedule_work(struct work_struct *work)
313 {
314 	return queue_work(dsa_owq, work);
315 }
316 
317 static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
318 
register_dsa_notifier(struct notifier_block * nb)319 int register_dsa_notifier(struct notifier_block *nb)
320 {
321 	return atomic_notifier_chain_register(&dsa_notif_chain, nb);
322 }
323 EXPORT_SYMBOL_GPL(register_dsa_notifier);
324 
unregister_dsa_notifier(struct notifier_block * nb)325 int unregister_dsa_notifier(struct notifier_block *nb)
326 {
327 	return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
328 }
329 EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
330 
call_dsa_notifiers(unsigned long val,struct net_device * dev,struct dsa_notifier_info * info)331 int call_dsa_notifiers(unsigned long val, struct net_device *dev,
332 		       struct dsa_notifier_info *info)
333 {
334 	info->dev = dev;
335 	return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
336 }
337 EXPORT_SYMBOL_GPL(call_dsa_notifiers);
338 
dsa_devlink_param_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx)339 int dsa_devlink_param_get(struct devlink *dl, u32 id,
340 			  struct devlink_param_gset_ctx *ctx)
341 {
342 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
343 
344 	if (!ds->ops->devlink_param_get)
345 		return -EOPNOTSUPP;
346 
347 	return ds->ops->devlink_param_get(ds, id, ctx);
348 }
349 EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
350 
dsa_devlink_param_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx)351 int dsa_devlink_param_set(struct devlink *dl, u32 id,
352 			  struct devlink_param_gset_ctx *ctx)
353 {
354 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
355 
356 	if (!ds->ops->devlink_param_set)
357 		return -EOPNOTSUPP;
358 
359 	return ds->ops->devlink_param_set(ds, id, ctx);
360 }
361 EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
362 
dsa_devlink_params_register(struct dsa_switch * ds,const struct devlink_param * params,size_t params_count)363 int dsa_devlink_params_register(struct dsa_switch *ds,
364 				const struct devlink_param *params,
365 				size_t params_count)
366 {
367 	return devlink_params_register(ds->devlink, params, params_count);
368 }
369 EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
370 
dsa_devlink_params_unregister(struct dsa_switch * ds,const struct devlink_param * params,size_t params_count)371 void dsa_devlink_params_unregister(struct dsa_switch *ds,
372 				   const struct devlink_param *params,
373 				   size_t params_count)
374 {
375 	devlink_params_unregister(ds->devlink, params, params_count);
376 }
377 EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
378 
dsa_devlink_resource_register(struct dsa_switch * ds,const char * resource_name,u64 resource_size,u64 resource_id,u64 parent_resource_id,const struct devlink_resource_size_params * size_params)379 int dsa_devlink_resource_register(struct dsa_switch *ds,
380 				  const char *resource_name,
381 				  u64 resource_size,
382 				  u64 resource_id,
383 				  u64 parent_resource_id,
384 				  const struct devlink_resource_size_params *size_params)
385 {
386 	return devlink_resource_register(ds->devlink, resource_name,
387 					 resource_size, resource_id,
388 					 parent_resource_id,
389 					 size_params);
390 }
391 EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
392 
dsa_devlink_resources_unregister(struct dsa_switch * ds)393 void dsa_devlink_resources_unregister(struct dsa_switch *ds)
394 {
395 	devlink_resources_unregister(ds->devlink, NULL);
396 }
397 EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
398 
dsa_devlink_resource_occ_get_register(struct dsa_switch * ds,u64 resource_id,devlink_resource_occ_get_t * occ_get,void * occ_get_priv)399 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
400 					   u64 resource_id,
401 					   devlink_resource_occ_get_t *occ_get,
402 					   void *occ_get_priv)
403 {
404 	return devlink_resource_occ_get_register(ds->devlink, resource_id,
405 						 occ_get, occ_get_priv);
406 }
407 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
408 
dsa_devlink_resource_occ_get_unregister(struct dsa_switch * ds,u64 resource_id)409 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
410 					     u64 resource_id)
411 {
412 	devlink_resource_occ_get_unregister(ds->devlink, resource_id);
413 }
414 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
415 
416 struct devlink_region *
dsa_devlink_region_create(struct dsa_switch * ds,const struct devlink_region_ops * ops,u32 region_max_snapshots,u64 region_size)417 dsa_devlink_region_create(struct dsa_switch *ds,
418 			  const struct devlink_region_ops *ops,
419 			  u32 region_max_snapshots, u64 region_size)
420 {
421 	return devlink_region_create(ds->devlink, ops, region_max_snapshots,
422 				     region_size);
423 }
424 EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
425 
426 struct devlink_region *
dsa_devlink_port_region_create(struct dsa_switch * ds,int port,const struct devlink_port_region_ops * ops,u32 region_max_snapshots,u64 region_size)427 dsa_devlink_port_region_create(struct dsa_switch *ds,
428 			       int port,
429 			       const struct devlink_port_region_ops *ops,
430 			       u32 region_max_snapshots, u64 region_size)
431 {
432 	struct dsa_port *dp = dsa_to_port(ds, port);
433 
434 	return devlink_port_region_create(&dp->devlink_port, ops,
435 					  region_max_snapshots,
436 					  region_size);
437 }
438 EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
439 
dsa_devlink_region_destroy(struct devlink_region * region)440 void dsa_devlink_region_destroy(struct devlink_region *region)
441 {
442 	devlink_region_destroy(region);
443 }
444 EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
445 
dsa_port_from_netdev(struct net_device * netdev)446 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
447 {
448 	if (!netdev || !dsa_slave_dev_check(netdev))
449 		return ERR_PTR(-ENODEV);
450 
451 	return dsa_slave_to_port(netdev);
452 }
453 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
454 
dsa_init_module(void)455 static int __init dsa_init_module(void)
456 {
457 	int rc;
458 
459 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
460 					  WQ_MEM_RECLAIM);
461 	if (!dsa_owq)
462 		return -ENOMEM;
463 
464 	rc = dsa_slave_register_notifier();
465 	if (rc)
466 		goto register_notifier_fail;
467 
468 	dev_add_pack(&dsa_pack_type);
469 
470 	dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
471 				THIS_MODULE);
472 
473 	return 0;
474 
475 register_notifier_fail:
476 	destroy_workqueue(dsa_owq);
477 
478 	return rc;
479 }
480 module_init(dsa_init_module);
481 
dsa_cleanup_module(void)482 static void __exit dsa_cleanup_module(void)
483 {
484 	dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
485 
486 	dsa_slave_unregister_notifier();
487 	dev_remove_pack(&dsa_pack_type);
488 	destroy_workqueue(dsa_owq);
489 }
490 module_exit(dsa_cleanup_module);
491 
492 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
493 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
494 MODULE_LICENSE("GPL");
495 MODULE_ALIAS("platform:dsa");
496