• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019-2021 NXP
3  *
4  * This is an umbrella module for all network switches that are
5  * register-compatible with Ocelot and that perform I/O to their host CPU
6  * through an NPI (Node Processor Interface) Ethernet port.
7  */
8 #include <uapi/linux/if_bridge.h>
9 #include <soc/mscc/ocelot_vcap.h>
10 #include <soc/mscc/ocelot_qsys.h>
11 #include <soc/mscc/ocelot_sys.h>
12 #include <soc/mscc/ocelot_dev.h>
13 #include <soc/mscc/ocelot_ana.h>
14 #include <soc/mscc/ocelot_ptp.h>
15 #include <soc/mscc/ocelot.h>
16 #include <linux/dsa/8021q.h>
17 #include <linux/dsa/ocelot.h>
18 #include <linux/platform_device.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/module.h>
21 #include <linux/of_net.h>
22 #include <linux/pci.h>
23 #include <linux/of.h>
24 #include <linux/pcs-lynx.h>
25 #include <net/pkt_sched.h>
26 #include <net/dsa.h>
27 #include "felix.h"
28 
felix_tag_8021q_rxvlan_add(struct felix * felix,int port,u16 vid,bool pvid,bool untagged)29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
30 				      bool pvid, bool untagged)
31 {
32 	struct ocelot_vcap_filter *outer_tagging_rule;
33 	struct ocelot *ocelot = &felix->ocelot;
34 	struct dsa_switch *ds = felix->ds;
35 	int key_length, upstream, err;
36 
37 	/* We don't need to install the rxvlan into the other ports' filtering
38 	 * tables, because we're just pushing the rxvlan when sending towards
39 	 * the CPU
40 	 */
41 	if (!pvid)
42 		return 0;
43 
44 	key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
45 	upstream = dsa_upstream_port(ds, port);
46 
47 	outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
48 				     GFP_KERNEL);
49 	if (!outer_tagging_rule)
50 		return -ENOMEM;
51 
52 	outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
53 	outer_tagging_rule->prio = 1;
54 	outer_tagging_rule->id.cookie = port;
55 	outer_tagging_rule->id.tc_offload = false;
56 	outer_tagging_rule->block_id = VCAP_ES0;
57 	outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
58 	outer_tagging_rule->lookup = 0;
59 	outer_tagging_rule->ingress_port.value = port;
60 	outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
61 	outer_tagging_rule->egress_port.value = upstream;
62 	outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
63 	outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
64 	outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
65 	outer_tagging_rule->action.tag_a_vid_sel = 1;
66 	outer_tagging_rule->action.vid_a_val = vid;
67 
68 	err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
69 	if (err)
70 		kfree(outer_tagging_rule);
71 
72 	return err;
73 }
74 
felix_tag_8021q_txvlan_add(struct felix * felix,int port,u16 vid,bool pvid,bool untagged)75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
76 				      bool pvid, bool untagged)
77 {
78 	struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
79 	struct ocelot *ocelot = &felix->ocelot;
80 	struct dsa_switch *ds = felix->ds;
81 	int upstream, err;
82 
83 	/* tag_8021q.c assumes we are implementing this via port VLAN
84 	 * membership, which we aren't. So we don't need to add any VCAP filter
85 	 * for the CPU port.
86 	 */
87 	if (ocelot->ports[port]->is_dsa_8021q_cpu)
88 		return 0;
89 
90 	untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
91 	if (!untagging_rule)
92 		return -ENOMEM;
93 
94 	redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
95 	if (!redirect_rule) {
96 		kfree(untagging_rule);
97 		return -ENOMEM;
98 	}
99 
100 	upstream = dsa_upstream_port(ds, port);
101 
102 	untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
103 	untagging_rule->ingress_port_mask = BIT(upstream);
104 	untagging_rule->vlan.vid.value = vid;
105 	untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
106 	untagging_rule->prio = 1;
107 	untagging_rule->id.cookie = port;
108 	untagging_rule->id.tc_offload = false;
109 	untagging_rule->block_id = VCAP_IS1;
110 	untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
111 	untagging_rule->lookup = 0;
112 	untagging_rule->action.vlan_pop_cnt_ena = true;
113 	untagging_rule->action.vlan_pop_cnt = 1;
114 	untagging_rule->action.pag_override_mask = 0xff;
115 	untagging_rule->action.pag_val = port;
116 
117 	err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
118 	if (err) {
119 		kfree(untagging_rule);
120 		kfree(redirect_rule);
121 		return err;
122 	}
123 
124 	redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
125 	redirect_rule->ingress_port_mask = BIT(upstream);
126 	redirect_rule->pag = port;
127 	redirect_rule->prio = 1;
128 	redirect_rule->id.cookie = port;
129 	redirect_rule->id.tc_offload = false;
130 	redirect_rule->block_id = VCAP_IS2;
131 	redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
132 	redirect_rule->lookup = 0;
133 	redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
134 	redirect_rule->action.port_mask = BIT(port);
135 
136 	err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
137 	if (err) {
138 		ocelot_vcap_filter_del(ocelot, untagging_rule);
139 		kfree(redirect_rule);
140 		return err;
141 	}
142 
143 	return 0;
144 }
145 
felix_tag_8021q_vlan_add(struct dsa_switch * ds,int port,u16 vid,u16 flags)146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
147 				    u16 flags)
148 {
149 	bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
150 	bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
151 	struct ocelot *ocelot = ds->priv;
152 
153 	if (vid_is_dsa_8021q_rxvlan(vid))
154 		return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
155 						  port, vid, pvid, untagged);
156 
157 	if (vid_is_dsa_8021q_txvlan(vid))
158 		return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
159 						  port, vid, pvid, untagged);
160 
161 	return 0;
162 }
163 
felix_tag_8021q_rxvlan_del(struct felix * felix,int port,u16 vid)164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
165 {
166 	struct ocelot_vcap_filter *outer_tagging_rule;
167 	struct ocelot_vcap_block *block_vcap_es0;
168 	struct ocelot *ocelot = &felix->ocelot;
169 
170 	block_vcap_es0 = &ocelot->block[VCAP_ES0];
171 
172 	outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
173 								 port, false);
174 	/* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
175 	 * installing outer tagging ES0 rules where they weren't needed.
176 	 * But in rxvlan_del, the API doesn't give us the "flags" anymore,
177 	 * so that forces us to be slightly sloppy here, and just assume that
178 	 * if we didn't find an outer_tagging_rule it means that there was
179 	 * none in the first place, i.e. rxvlan_del is called on a non-pvid
180 	 * port. This is most probably true though.
181 	 */
182 	if (!outer_tagging_rule)
183 		return 0;
184 
185 	return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
186 }
187 
felix_tag_8021q_txvlan_del(struct felix * felix,int port,u16 vid)188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
189 {
190 	struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
191 	struct ocelot_vcap_block *block_vcap_is1;
192 	struct ocelot_vcap_block *block_vcap_is2;
193 	struct ocelot *ocelot = &felix->ocelot;
194 	int err;
195 
196 	if (ocelot->ports[port]->is_dsa_8021q_cpu)
197 		return 0;
198 
199 	block_vcap_is1 = &ocelot->block[VCAP_IS1];
200 	block_vcap_is2 = &ocelot->block[VCAP_IS2];
201 
202 	untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
203 							     port, false);
204 	if (!untagging_rule)
205 		return 0;
206 
207 	err = ocelot_vcap_filter_del(ocelot, untagging_rule);
208 	if (err)
209 		return err;
210 
211 	redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
212 							    port, false);
213 	if (!redirect_rule)
214 		return 0;
215 
216 	return ocelot_vcap_filter_del(ocelot, redirect_rule);
217 }
218 
felix_tag_8021q_vlan_del(struct dsa_switch * ds,int port,u16 vid)219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
220 {
221 	struct ocelot *ocelot = ds->priv;
222 
223 	if (vid_is_dsa_8021q_rxvlan(vid))
224 		return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
225 						  port, vid);
226 
227 	if (vid_is_dsa_8021q_txvlan(vid))
228 		return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
229 						  port, vid);
230 
231 	return 0;
232 }
233 
234 /* Alternatively to using the NPI functionality, that same hardware MAC
235  * connected internally to the enetc or fman DSA master can be configured to
236  * use the software-defined tag_8021q frame format. As far as the hardware is
237  * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
238  * module are now disconnected from it, but can still be accessed through
239  * register-based MMIO.
240  */
felix_8021q_cpu_port_init(struct ocelot * ocelot,int port)241 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
242 {
243 	ocelot->ports[port]->is_dsa_8021q_cpu = true;
244 	ocelot->npi = -1;
245 
246 	/* Overwrite PGID_CPU with the non-tagging port */
247 	ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
248 
249 	ocelot_apply_bridge_fwd_mask(ocelot);
250 }
251 
felix_8021q_cpu_port_deinit(struct ocelot * ocelot,int port)252 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
253 {
254 	ocelot->ports[port]->is_dsa_8021q_cpu = false;
255 
256 	/* Restore PGID_CPU */
257 	ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
258 			 PGID_CPU);
259 
260 	ocelot_apply_bridge_fwd_mask(ocelot);
261 }
262 
263 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
264  * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
265  * tag_8021q CPU port.
266  */
felix_setup_mmio_filtering(struct felix * felix)267 static int felix_setup_mmio_filtering(struct felix *felix)
268 {
269 	unsigned long user_ports = dsa_user_ports(felix->ds);
270 	struct ocelot_vcap_filter *redirect_rule;
271 	struct ocelot_vcap_filter *tagging_rule;
272 	struct ocelot *ocelot = &felix->ocelot;
273 	struct dsa_switch *ds = felix->ds;
274 	int cpu = -1, port, ret;
275 
276 	tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
277 	if (!tagging_rule)
278 		return -ENOMEM;
279 
280 	redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
281 	if (!redirect_rule) {
282 		kfree(tagging_rule);
283 		return -ENOMEM;
284 	}
285 
286 	for (port = 0; port < ocelot->num_phys_ports; port++) {
287 		if (dsa_is_cpu_port(ds, port)) {
288 			cpu = port;
289 			break;
290 		}
291 	}
292 
293 	if (cpu < 0) {
294 		kfree(tagging_rule);
295 		kfree(redirect_rule);
296 		return -EINVAL;
297 	}
298 
299 	tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
300 	*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
301 	*(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
302 	tagging_rule->ingress_port_mask = user_ports;
303 	tagging_rule->prio = 1;
304 	tagging_rule->id.cookie = ocelot->num_phys_ports;
305 	tagging_rule->id.tc_offload = false;
306 	tagging_rule->block_id = VCAP_IS1;
307 	tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
308 	tagging_rule->lookup = 0;
309 	tagging_rule->action.pag_override_mask = 0xff;
310 	tagging_rule->action.pag_val = ocelot->num_phys_ports;
311 
312 	ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
313 	if (ret) {
314 		kfree(tagging_rule);
315 		kfree(redirect_rule);
316 		return ret;
317 	}
318 
319 	redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
320 	redirect_rule->ingress_port_mask = user_ports;
321 	redirect_rule->pag = ocelot->num_phys_ports;
322 	redirect_rule->prio = 1;
323 	redirect_rule->id.cookie = ocelot->num_phys_ports;
324 	redirect_rule->id.tc_offload = false;
325 	redirect_rule->block_id = VCAP_IS2;
326 	redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
327 	redirect_rule->lookup = 0;
328 	redirect_rule->action.cpu_copy_ena = true;
329 	if (felix->info->quirk_no_xtr_irq) {
330 		/* Redirect to the tag_8021q CPU but also copy PTP packets to
331 		 * the CPU port module
332 		 */
333 		redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
334 		redirect_rule->action.port_mask = BIT(cpu);
335 	} else {
336 		/* Trap PTP packets only to the CPU port module (which is
337 		 * redirected to the NPI port)
338 		 */
339 		redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
340 		redirect_rule->action.port_mask = 0;
341 	}
342 
343 	ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
344 	if (ret) {
345 		ocelot_vcap_filter_del(ocelot, tagging_rule);
346 		kfree(redirect_rule);
347 		return ret;
348 	}
349 
350 	/* The ownership of the CPU port module's queues might have just been
351 	 * transferred to the tag_8021q tagger from the NPI-based tagger.
352 	 * So there might still be all sorts of crap in the queues. On the
353 	 * other hand, the MMIO-based matching of PTP frames is very brittle,
354 	 * so we need to be careful that there are no extra frames to be
355 	 * dequeued over MMIO, since we would never know to discard them.
356 	 */
357 	ocelot_drain_cpu_queue(ocelot, 0);
358 
359 	return 0;
360 }
361 
felix_teardown_mmio_filtering(struct felix * felix)362 static int felix_teardown_mmio_filtering(struct felix *felix)
363 {
364 	struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
365 	struct ocelot_vcap_block *block_vcap_is1;
366 	struct ocelot_vcap_block *block_vcap_is2;
367 	struct ocelot *ocelot = &felix->ocelot;
368 	int err;
369 
370 	block_vcap_is1 = &ocelot->block[VCAP_IS1];
371 	block_vcap_is2 = &ocelot->block[VCAP_IS2];
372 
373 	tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
374 							   ocelot->num_phys_ports,
375 							   false);
376 	if (!tagging_rule)
377 		return -ENOENT;
378 
379 	err = ocelot_vcap_filter_del(ocelot, tagging_rule);
380 	if (err)
381 		return err;
382 
383 	redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
384 							    ocelot->num_phys_ports,
385 							    false);
386 	if (!redirect_rule)
387 		return -ENOENT;
388 
389 	return ocelot_vcap_filter_del(ocelot, redirect_rule);
390 }
391 
felix_setup_tag_8021q(struct dsa_switch * ds,int cpu)392 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
393 {
394 	struct ocelot *ocelot = ds->priv;
395 	struct felix *felix = ocelot_to_felix(ocelot);
396 	unsigned long cpu_flood;
397 	int port, err;
398 
399 	felix_8021q_cpu_port_init(ocelot, cpu);
400 
401 	for (port = 0; port < ds->num_ports; port++) {
402 		if (dsa_is_unused_port(ds, port))
403 			continue;
404 
405 		/* This overwrites ocelot_init():
406 		 * Do not forward BPDU frames to the CPU port module,
407 		 * for 2 reasons:
408 		 * - When these packets are injected from the tag_8021q
409 		 *   CPU port, we want them to go out, not loop back
410 		 *   into the system.
411 		 * - STP traffic ingressing on a user port should go to
412 		 *   the tag_8021q CPU port, not to the hardware CPU
413 		 *   port module.
414 		 */
415 		ocelot_write_gix(ocelot,
416 				 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
417 				 ANA_PORT_CPU_FWD_BPDU_CFG, port);
418 	}
419 
420 	/* In tag_8021q mode, the CPU port module is unused, except for PTP
421 	 * frames. So we want to disable flooding of any kind to the CPU port
422 	 * module, since packets going there will end in a black hole.
423 	 */
424 	cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
425 	ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
426 	ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
427 	ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
428 
429 	err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
430 	if (err)
431 		return err;
432 
433 	err = felix_setup_mmio_filtering(felix);
434 	if (err)
435 		goto out_tag_8021q_unregister;
436 
437 	return 0;
438 
439 out_tag_8021q_unregister:
440 	dsa_tag_8021q_unregister(ds);
441 	return err;
442 }
443 
felix_teardown_tag_8021q(struct dsa_switch * ds,int cpu)444 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
445 {
446 	struct ocelot *ocelot = ds->priv;
447 	struct felix *felix = ocelot_to_felix(ocelot);
448 	int err, port;
449 
450 	err = felix_teardown_mmio_filtering(felix);
451 	if (err)
452 		dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
453 			err);
454 
455 	dsa_tag_8021q_unregister(ds);
456 
457 	for (port = 0; port < ds->num_ports; port++) {
458 		if (dsa_is_unused_port(ds, port))
459 			continue;
460 
461 		/* Restore the logic from ocelot_init:
462 		 * do not forward BPDU frames to the front ports.
463 		 */
464 		ocelot_write_gix(ocelot,
465 				 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
466 				 ANA_PORT_CPU_FWD_BPDU_CFG,
467 				 port);
468 	}
469 
470 	felix_8021q_cpu_port_deinit(ocelot, cpu);
471 }
472 
473 /* The CPU port module is connected to the Node Processor Interface (NPI). This
474  * is the mode through which frames can be injected from and extracted to an
475  * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
476  * running Linux, and this forms a DSA setup together with the enetc or fman
477  * DSA master.
478  */
felix_npi_port_init(struct ocelot * ocelot,int port)479 static void felix_npi_port_init(struct ocelot *ocelot, int port)
480 {
481 	ocelot->npi = port;
482 
483 	ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
484 		     QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
485 		     QSYS_EXT_CPU_CFG);
486 
487 	/* NPI port Injection/Extraction configuration */
488 	ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
489 			    ocelot->npi_xtr_prefix);
490 	ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
491 			    ocelot->npi_inj_prefix);
492 
493 	/* Disable transmission of pause frames */
494 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
495 }
496 
felix_npi_port_deinit(struct ocelot * ocelot,int port)497 static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
498 {
499 	/* Restore hardware defaults */
500 	int unused_port = ocelot->num_phys_ports + 2;
501 
502 	ocelot->npi = -1;
503 
504 	ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
505 		     QSYS_EXT_CPU_CFG);
506 
507 	ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
508 			    OCELOT_TAG_PREFIX_DISABLED);
509 	ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
510 			    OCELOT_TAG_PREFIX_DISABLED);
511 
512 	/* Enable transmission of pause frames */
513 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
514 }
515 
felix_setup_tag_npi(struct dsa_switch * ds,int cpu)516 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
517 {
518 	struct ocelot *ocelot = ds->priv;
519 	unsigned long cpu_flood;
520 
521 	felix_npi_port_init(ocelot, cpu);
522 
523 	/* Include the CPU port module (and indirectly, the NPI port)
524 	 * in the forwarding mask for unknown unicast - the hardware
525 	 * default value for ANA_FLOODING_FLD_UNICAST excludes
526 	 * BIT(ocelot->num_phys_ports), and so does ocelot_init,
527 	 * since Ocelot relies on whitelisting MAC addresses towards
528 	 * PGID_CPU.
529 	 * We do this because DSA does not yet perform RX filtering,
530 	 * and the NPI port does not perform source address learning,
531 	 * so traffic sent to Linux is effectively unknown from the
532 	 * switch's perspective.
533 	 */
534 	cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
535 	ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
536 	ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
537 	ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
538 
539 	return 0;
540 }
541 
felix_teardown_tag_npi(struct dsa_switch * ds,int cpu)542 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
543 {
544 	struct ocelot *ocelot = ds->priv;
545 
546 	felix_npi_port_deinit(ocelot, cpu);
547 }
548 
felix_set_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)549 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
550 				  enum dsa_tag_protocol proto)
551 {
552 	int err;
553 
554 	switch (proto) {
555 	case DSA_TAG_PROTO_SEVILLE:
556 	case DSA_TAG_PROTO_OCELOT:
557 		err = felix_setup_tag_npi(ds, cpu);
558 		break;
559 	case DSA_TAG_PROTO_OCELOT_8021Q:
560 		err = felix_setup_tag_8021q(ds, cpu);
561 		break;
562 	default:
563 		err = -EPROTONOSUPPORT;
564 	}
565 
566 	return err;
567 }
568 
felix_del_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)569 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
570 				   enum dsa_tag_protocol proto)
571 {
572 	switch (proto) {
573 	case DSA_TAG_PROTO_SEVILLE:
574 	case DSA_TAG_PROTO_OCELOT:
575 		felix_teardown_tag_npi(ds, cpu);
576 		break;
577 	case DSA_TAG_PROTO_OCELOT_8021Q:
578 		felix_teardown_tag_8021q(ds, cpu);
579 		break;
580 	default:
581 		break;
582 	}
583 }
584 
585 /* This always leaves the switch in a consistent state, because although the
586  * tag_8021q setup can fail, the NPI setup can't. So either the change is made,
587  * or the restoration is guaranteed to work.
588  */
felix_change_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)589 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
590 				     enum dsa_tag_protocol proto)
591 {
592 	struct ocelot *ocelot = ds->priv;
593 	struct felix *felix = ocelot_to_felix(ocelot);
594 	enum dsa_tag_protocol old_proto = felix->tag_proto;
595 	int err;
596 
597 	if (proto != DSA_TAG_PROTO_SEVILLE &&
598 	    proto != DSA_TAG_PROTO_OCELOT &&
599 	    proto != DSA_TAG_PROTO_OCELOT_8021Q)
600 		return -EPROTONOSUPPORT;
601 
602 	felix_del_tag_protocol(ds, cpu, old_proto);
603 
604 	err = felix_set_tag_protocol(ds, cpu, proto);
605 	if (err) {
606 		felix_set_tag_protocol(ds, cpu, old_proto);
607 		return err;
608 	}
609 
610 	felix->tag_proto = proto;
611 
612 	return 0;
613 }
614 
felix_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)615 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
616 						    int port,
617 						    enum dsa_tag_protocol mp)
618 {
619 	struct ocelot *ocelot = ds->priv;
620 	struct felix *felix = ocelot_to_felix(ocelot);
621 
622 	return felix->tag_proto;
623 }
624 
felix_set_ageing_time(struct dsa_switch * ds,unsigned int ageing_time)625 static int felix_set_ageing_time(struct dsa_switch *ds,
626 				 unsigned int ageing_time)
627 {
628 	struct ocelot *ocelot = ds->priv;
629 
630 	ocelot_set_ageing_time(ocelot, ageing_time);
631 
632 	return 0;
633 }
634 
felix_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)635 static int felix_fdb_dump(struct dsa_switch *ds, int port,
636 			  dsa_fdb_dump_cb_t *cb, void *data)
637 {
638 	struct ocelot *ocelot = ds->priv;
639 
640 	return ocelot_fdb_dump(ocelot, port, cb, data);
641 }
642 
felix_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)643 static int felix_fdb_add(struct dsa_switch *ds, int port,
644 			 const unsigned char *addr, u16 vid)
645 {
646 	struct ocelot *ocelot = ds->priv;
647 
648 	return ocelot_fdb_add(ocelot, port, addr, vid);
649 }
650 
felix_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)651 static int felix_fdb_del(struct dsa_switch *ds, int port,
652 			 const unsigned char *addr, u16 vid)
653 {
654 	struct ocelot *ocelot = ds->priv;
655 
656 	return ocelot_fdb_del(ocelot, port, addr, vid);
657 }
658 
felix_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)659 static int felix_mdb_add(struct dsa_switch *ds, int port,
660 			 const struct switchdev_obj_port_mdb *mdb)
661 {
662 	struct ocelot *ocelot = ds->priv;
663 
664 	return ocelot_port_mdb_add(ocelot, port, mdb);
665 }
666 
felix_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)667 static int felix_mdb_del(struct dsa_switch *ds, int port,
668 			 const struct switchdev_obj_port_mdb *mdb)
669 {
670 	struct ocelot *ocelot = ds->priv;
671 
672 	return ocelot_port_mdb_del(ocelot, port, mdb);
673 }
674 
felix_bridge_stp_state_set(struct dsa_switch * ds,int port,u8 state)675 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
676 				       u8 state)
677 {
678 	struct ocelot *ocelot = ds->priv;
679 
680 	return ocelot_bridge_stp_state_set(ocelot, port, state);
681 }
682 
felix_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)683 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
684 				  struct switchdev_brport_flags val,
685 				  struct netlink_ext_ack *extack)
686 {
687 	struct ocelot *ocelot = ds->priv;
688 
689 	return ocelot_port_pre_bridge_flags(ocelot, port, val);
690 }
691 
felix_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)692 static int felix_bridge_flags(struct dsa_switch *ds, int port,
693 			      struct switchdev_brport_flags val,
694 			      struct netlink_ext_ack *extack)
695 {
696 	struct ocelot *ocelot = ds->priv;
697 
698 	ocelot_port_bridge_flags(ocelot, port, val);
699 
700 	return 0;
701 }
702 
felix_bridge_join(struct dsa_switch * ds,int port,struct net_device * br)703 static int felix_bridge_join(struct dsa_switch *ds, int port,
704 			     struct net_device *br)
705 {
706 	struct ocelot *ocelot = ds->priv;
707 
708 	ocelot_port_bridge_join(ocelot, port, br);
709 
710 	return 0;
711 }
712 
felix_bridge_leave(struct dsa_switch * ds,int port,struct net_device * br)713 static void felix_bridge_leave(struct dsa_switch *ds, int port,
714 			       struct net_device *br)
715 {
716 	struct ocelot *ocelot = ds->priv;
717 
718 	ocelot_port_bridge_leave(ocelot, port, br);
719 }
720 
felix_lag_join(struct dsa_switch * ds,int port,struct net_device * bond,struct netdev_lag_upper_info * info)721 static int felix_lag_join(struct dsa_switch *ds, int port,
722 			  struct net_device *bond,
723 			  struct netdev_lag_upper_info *info)
724 {
725 	struct ocelot *ocelot = ds->priv;
726 
727 	return ocelot_port_lag_join(ocelot, port, bond, info);
728 }
729 
felix_lag_leave(struct dsa_switch * ds,int port,struct net_device * bond)730 static int felix_lag_leave(struct dsa_switch *ds, int port,
731 			   struct net_device *bond)
732 {
733 	struct ocelot *ocelot = ds->priv;
734 
735 	ocelot_port_lag_leave(ocelot, port, bond);
736 
737 	return 0;
738 }
739 
felix_lag_change(struct dsa_switch * ds,int port)740 static int felix_lag_change(struct dsa_switch *ds, int port)
741 {
742 	struct dsa_port *dp = dsa_to_port(ds, port);
743 	struct ocelot *ocelot = ds->priv;
744 
745 	ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
746 
747 	return 0;
748 }
749 
felix_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)750 static int felix_vlan_prepare(struct dsa_switch *ds, int port,
751 			      const struct switchdev_obj_port_vlan *vlan,
752 			      struct netlink_ext_ack *extack)
753 {
754 	struct ocelot *ocelot = ds->priv;
755 	u16 flags = vlan->flags;
756 
757 	/* Ocelot switches copy frames as-is to the CPU, so the flags:
758 	 * egress-untagged or not, pvid or not, make no difference. This
759 	 * behavior is already better than what DSA just tries to approximate
760 	 * when it installs the VLAN with the same flags on the CPU port.
761 	 * Just accept any configuration, and don't let ocelot deny installing
762 	 * multiple native VLANs on the NPI port, because the switch doesn't
763 	 * look at the port tag settings towards the NPI interface anyway.
764 	 */
765 	if (port == ocelot->npi)
766 		return 0;
767 
768 	return ocelot_vlan_prepare(ocelot, port, vlan->vid,
769 				   flags & BRIDGE_VLAN_INFO_PVID,
770 				   flags & BRIDGE_VLAN_INFO_UNTAGGED,
771 				   extack);
772 }
773 
felix_vlan_filtering(struct dsa_switch * ds,int port,bool enabled,struct netlink_ext_ack * extack)774 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
775 				struct netlink_ext_ack *extack)
776 {
777 	struct ocelot *ocelot = ds->priv;
778 
779 	return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
780 }
781 
felix_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)782 static int felix_vlan_add(struct dsa_switch *ds, int port,
783 			  const struct switchdev_obj_port_vlan *vlan,
784 			  struct netlink_ext_ack *extack)
785 {
786 	struct ocelot *ocelot = ds->priv;
787 	u16 flags = vlan->flags;
788 	int err;
789 
790 	err = felix_vlan_prepare(ds, port, vlan, extack);
791 	if (err)
792 		return err;
793 
794 	return ocelot_vlan_add(ocelot, port, vlan->vid,
795 			       flags & BRIDGE_VLAN_INFO_PVID,
796 			       flags & BRIDGE_VLAN_INFO_UNTAGGED);
797 }
798 
felix_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)799 static int felix_vlan_del(struct dsa_switch *ds, int port,
800 			  const struct switchdev_obj_port_vlan *vlan)
801 {
802 	struct ocelot *ocelot = ds->priv;
803 
804 	return ocelot_vlan_del(ocelot, port, vlan->vid);
805 }
806 
felix_phylink_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)807 static void felix_phylink_validate(struct dsa_switch *ds, int port,
808 				   unsigned long *supported,
809 				   struct phylink_link_state *state)
810 {
811 	struct ocelot *ocelot = ds->priv;
812 	struct felix *felix = ocelot_to_felix(ocelot);
813 
814 	if (felix->info->phylink_validate)
815 		felix->info->phylink_validate(ocelot, port, supported, state);
816 }
817 
felix_phylink_mac_config(struct dsa_switch * ds,int port,unsigned int link_an_mode,const struct phylink_link_state * state)818 static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
819 				     unsigned int link_an_mode,
820 				     const struct phylink_link_state *state)
821 {
822 	struct ocelot *ocelot = ds->priv;
823 	struct felix *felix = ocelot_to_felix(ocelot);
824 	struct dsa_port *dp = dsa_to_port(ds, port);
825 
826 	if (felix->pcs[port])
827 		phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
828 }
829 
felix_phylink_mac_link_down(struct dsa_switch * ds,int port,unsigned int link_an_mode,phy_interface_t interface)830 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
831 					unsigned int link_an_mode,
832 					phy_interface_t interface)
833 {
834 	struct ocelot *ocelot = ds->priv;
835 
836 	ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
837 				     FELIX_MAC_QUIRKS);
838 }
839 
felix_phylink_mac_link_up(struct dsa_switch * ds,int port,unsigned int link_an_mode,phy_interface_t interface,struct phy_device * phydev,int speed,int duplex,bool tx_pause,bool rx_pause)840 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
841 				      unsigned int link_an_mode,
842 				      phy_interface_t interface,
843 				      struct phy_device *phydev,
844 				      int speed, int duplex,
845 				      bool tx_pause, bool rx_pause)
846 {
847 	struct ocelot *ocelot = ds->priv;
848 	struct felix *felix = ocelot_to_felix(ocelot);
849 
850 	ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
851 				   interface, speed, duplex, tx_pause, rx_pause,
852 				   FELIX_MAC_QUIRKS);
853 
854 	if (felix->info->port_sched_speed_set)
855 		felix->info->port_sched_speed_set(ocelot, port, speed);
856 }
857 
felix_port_qos_map_init(struct ocelot * ocelot,int port)858 static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
859 {
860 	int i;
861 
862 	ocelot_rmw_gix(ocelot,
863 		       ANA_PORT_QOS_CFG_QOS_PCP_ENA,
864 		       ANA_PORT_QOS_CFG_QOS_PCP_ENA,
865 		       ANA_PORT_QOS_CFG,
866 		       port);
867 
868 	for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
869 		ocelot_rmw_ix(ocelot,
870 			      (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
871 			      ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
872 			      ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
873 			      ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
874 			      ANA_PORT_PCP_DEI_MAP,
875 			      port, i);
876 	}
877 }
878 
felix_get_strings(struct dsa_switch * ds,int port,u32 stringset,u8 * data)879 static void felix_get_strings(struct dsa_switch *ds, int port,
880 			      u32 stringset, u8 *data)
881 {
882 	struct ocelot *ocelot = ds->priv;
883 
884 	return ocelot_get_strings(ocelot, port, stringset, data);
885 }
886 
felix_get_ethtool_stats(struct dsa_switch * ds,int port,u64 * data)887 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
888 {
889 	struct ocelot *ocelot = ds->priv;
890 
891 	ocelot_get_ethtool_stats(ocelot, port, data);
892 }
893 
felix_get_sset_count(struct dsa_switch * ds,int port,int sset)894 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
895 {
896 	struct ocelot *ocelot = ds->priv;
897 
898 	return ocelot_get_sset_count(ocelot, port, sset);
899 }
900 
felix_get_ts_info(struct dsa_switch * ds,int port,struct ethtool_ts_info * info)901 static int felix_get_ts_info(struct dsa_switch *ds, int port,
902 			     struct ethtool_ts_info *info)
903 {
904 	struct ocelot *ocelot = ds->priv;
905 
906 	return ocelot_get_ts_info(ocelot, port, info);
907 }
908 
felix_parse_ports_node(struct felix * felix,struct device_node * ports_node,phy_interface_t * port_phy_modes)909 static int felix_parse_ports_node(struct felix *felix,
910 				  struct device_node *ports_node,
911 				  phy_interface_t *port_phy_modes)
912 {
913 	struct ocelot *ocelot = &felix->ocelot;
914 	struct device *dev = felix->ocelot.dev;
915 	struct device_node *child;
916 
917 	for_each_available_child_of_node(ports_node, child) {
918 		phy_interface_t phy_mode;
919 		u32 port;
920 		int err;
921 
922 		/* Get switch port number from DT */
923 		if (of_property_read_u32(child, "reg", &port) < 0) {
924 			dev_err(dev, "Port number not defined in device tree "
925 				"(property \"reg\")\n");
926 			of_node_put(child);
927 			return -ENODEV;
928 		}
929 
930 		/* Get PHY mode from DT */
931 		err = of_get_phy_mode(child, &phy_mode);
932 		if (err) {
933 			dev_err(dev, "Failed to read phy-mode or "
934 				"phy-interface-type property for port %d\n",
935 				port);
936 			of_node_put(child);
937 			return -ENODEV;
938 		}
939 
940 		err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
941 		if (err < 0) {
942 			dev_err(dev, "Unsupported PHY mode %s on port %d\n",
943 				phy_modes(phy_mode), port);
944 			of_node_put(child);
945 			return err;
946 		}
947 
948 		port_phy_modes[port] = phy_mode;
949 	}
950 
951 	return 0;
952 }
953 
felix_parse_dt(struct felix * felix,phy_interface_t * port_phy_modes)954 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
955 {
956 	struct device *dev = felix->ocelot.dev;
957 	struct device_node *switch_node;
958 	struct device_node *ports_node;
959 	int err;
960 
961 	switch_node = dev->of_node;
962 
963 	ports_node = of_get_child_by_name(switch_node, "ports");
964 	if (!ports_node) {
965 		dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
966 		return -ENODEV;
967 	}
968 
969 	err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
970 	of_node_put(ports_node);
971 
972 	return err;
973 }
974 
felix_init_structs(struct felix * felix,int num_phys_ports)975 static int felix_init_structs(struct felix *felix, int num_phys_ports)
976 {
977 	struct ocelot *ocelot = &felix->ocelot;
978 	phy_interface_t *port_phy_modes;
979 	struct resource res;
980 	int port, i, err;
981 
982 	ocelot->num_phys_ports = num_phys_ports;
983 	ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
984 				     sizeof(struct ocelot_port *), GFP_KERNEL);
985 	if (!ocelot->ports)
986 		return -ENOMEM;
987 
988 	ocelot->map		= felix->info->map;
989 	ocelot->stats_layout	= felix->info->stats_layout;
990 	ocelot->num_stats	= felix->info->num_stats;
991 	ocelot->num_mact_rows	= felix->info->num_mact_rows;
992 	ocelot->vcap		= felix->info->vcap;
993 	ocelot->ops		= felix->info->ops;
994 	ocelot->npi_inj_prefix	= OCELOT_TAG_PREFIX_SHORT;
995 	ocelot->npi_xtr_prefix	= OCELOT_TAG_PREFIX_SHORT;
996 	ocelot->devlink		= felix->ds->devlink;
997 
998 	port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
999 				 GFP_KERNEL);
1000 	if (!port_phy_modes)
1001 		return -ENOMEM;
1002 
1003 	err = felix_parse_dt(felix, port_phy_modes);
1004 	if (err) {
1005 		kfree(port_phy_modes);
1006 		return err;
1007 	}
1008 
1009 	for (i = 0; i < TARGET_MAX; i++) {
1010 		struct regmap *target;
1011 
1012 		if (!felix->info->target_io_res[i].name)
1013 			continue;
1014 
1015 		memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
1016 		res.flags = IORESOURCE_MEM;
1017 		res.start += felix->switch_base;
1018 		res.end += felix->switch_base;
1019 
1020 		target = ocelot_regmap_init(ocelot, &res);
1021 		if (IS_ERR(target)) {
1022 			dev_err(ocelot->dev,
1023 				"Failed to map device memory space\n");
1024 			kfree(port_phy_modes);
1025 			return PTR_ERR(target);
1026 		}
1027 
1028 		ocelot->targets[i] = target;
1029 	}
1030 
1031 	err = ocelot_regfields_init(ocelot, felix->info->regfields);
1032 	if (err) {
1033 		dev_err(ocelot->dev, "failed to init reg fields map\n");
1034 		kfree(port_phy_modes);
1035 		return err;
1036 	}
1037 
1038 	for (port = 0; port < num_phys_ports; port++) {
1039 		struct ocelot_port *ocelot_port;
1040 		struct regmap *target;
1041 
1042 		ocelot_port = devm_kzalloc(ocelot->dev,
1043 					   sizeof(struct ocelot_port),
1044 					   GFP_KERNEL);
1045 		if (!ocelot_port) {
1046 			dev_err(ocelot->dev,
1047 				"failed to allocate port memory\n");
1048 			kfree(port_phy_modes);
1049 			return -ENOMEM;
1050 		}
1051 
1052 		memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
1053 		res.flags = IORESOURCE_MEM;
1054 		res.start += felix->switch_base;
1055 		res.end += felix->switch_base;
1056 
1057 		target = ocelot_regmap_init(ocelot, &res);
1058 		if (IS_ERR(target)) {
1059 			dev_err(ocelot->dev,
1060 				"Failed to map memory space for port %d\n",
1061 				port);
1062 			kfree(port_phy_modes);
1063 			return PTR_ERR(target);
1064 		}
1065 
1066 		ocelot_port->phy_mode = port_phy_modes[port];
1067 		ocelot_port->ocelot = ocelot;
1068 		ocelot_port->target = target;
1069 		ocelot->ports[port] = ocelot_port;
1070 	}
1071 
1072 	kfree(port_phy_modes);
1073 
1074 	if (felix->info->mdio_bus_alloc) {
1075 		err = felix->info->mdio_bus_alloc(ocelot);
1076 		if (err < 0)
1077 			return err;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
ocelot_port_purge_txtstamp_skb(struct ocelot * ocelot,int port,struct sk_buff * skb)1083 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
1084 					   struct sk_buff *skb)
1085 {
1086 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1087 	struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
1088 	struct sk_buff *skb_match = NULL, *skb_tmp;
1089 	unsigned long flags;
1090 
1091 	if (!clone)
1092 		return;
1093 
1094 	spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
1095 
1096 	skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
1097 		if (skb != clone)
1098 			continue;
1099 		__skb_unlink(skb, &ocelot_port->tx_skbs);
1100 		skb_match = skb;
1101 		break;
1102 	}
1103 
1104 	spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
1105 
1106 	WARN_ONCE(!skb_match,
1107 		  "Could not find skb clone in TX timestamping list\n");
1108 }
1109 
1110 #define work_to_xmit_work(w) \
1111 		container_of((w), struct felix_deferred_xmit_work, work)
1112 
felix_port_deferred_xmit(struct kthread_work * work)1113 static void felix_port_deferred_xmit(struct kthread_work *work)
1114 {
1115 	struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
1116 	struct dsa_switch *ds = xmit_work->dp->ds;
1117 	struct sk_buff *skb = xmit_work->skb;
1118 	u32 rew_op = ocelot_ptp_rew_op(skb);
1119 	struct ocelot *ocelot = ds->priv;
1120 	int port = xmit_work->dp->index;
1121 	int retries = 10;
1122 
1123 	do {
1124 		if (ocelot_can_inject(ocelot, 0))
1125 			break;
1126 
1127 		cpu_relax();
1128 	} while (--retries);
1129 
1130 	if (!retries) {
1131 		dev_err(ocelot->dev, "port %d failed to inject skb\n",
1132 			port);
1133 		ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
1134 		kfree_skb(skb);
1135 		return;
1136 	}
1137 
1138 	ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
1139 
1140 	consume_skb(skb);
1141 	kfree(xmit_work);
1142 }
1143 
felix_port_setup_tagger_data(struct dsa_switch * ds,int port)1144 static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
1145 {
1146 	struct dsa_port *dp = dsa_to_port(ds, port);
1147 	struct ocelot *ocelot = ds->priv;
1148 	struct felix *felix = ocelot_to_felix(ocelot);
1149 	struct felix_port *felix_port;
1150 
1151 	if (!dsa_port_is_user(dp))
1152 		return 0;
1153 
1154 	felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
1155 	if (!felix_port)
1156 		return -ENOMEM;
1157 
1158 	felix_port->xmit_worker = felix->xmit_worker;
1159 	felix_port->xmit_work_fn = felix_port_deferred_xmit;
1160 
1161 	dp->priv = felix_port;
1162 
1163 	return 0;
1164 }
1165 
felix_port_teardown_tagger_data(struct dsa_switch * ds,int port)1166 static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
1167 {
1168 	struct dsa_port *dp = dsa_to_port(ds, port);
1169 	struct felix_port *felix_port = dp->priv;
1170 
1171 	if (!felix_port)
1172 		return;
1173 
1174 	dp->priv = NULL;
1175 	kfree(felix_port);
1176 }
1177 
1178 /* Hardware initialization done here so that we can allocate structures with
1179  * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
1180  * us to allocate structures twice (leak memory) and map PCI memory twice
1181  * (which will not work).
1182  */
felix_setup(struct dsa_switch * ds)1183 static int felix_setup(struct dsa_switch *ds)
1184 {
1185 	struct ocelot *ocelot = ds->priv;
1186 	struct felix *felix = ocelot_to_felix(ocelot);
1187 	int port, err;
1188 
1189 	err = felix_init_structs(felix, ds->num_ports);
1190 	if (err)
1191 		return err;
1192 
1193 	err = ocelot_init(ocelot);
1194 	if (err)
1195 		goto out_mdiobus_free;
1196 
1197 	if (ocelot->ptp) {
1198 		err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
1199 		if (err) {
1200 			dev_err(ocelot->dev,
1201 				"Timestamp initialization failed\n");
1202 			ocelot->ptp = 0;
1203 		}
1204 	}
1205 
1206 	felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
1207 	if (IS_ERR(felix->xmit_worker)) {
1208 		err = PTR_ERR(felix->xmit_worker);
1209 		goto out_deinit_timestamp;
1210 	}
1211 
1212 	for (port = 0; port < ds->num_ports; port++) {
1213 		if (dsa_is_unused_port(ds, port))
1214 			continue;
1215 
1216 		ocelot_init_port(ocelot, port);
1217 
1218 		/* Set the default QoS Classification based on PCP and DEI
1219 		 * bits of vlan tag.
1220 		 */
1221 		felix_port_qos_map_init(ocelot, port);
1222 
1223 		err = felix_port_setup_tagger_data(ds, port);
1224 		if (err) {
1225 			dev_err(ds->dev,
1226 				"port %d failed to set up tagger data: %pe\n",
1227 				port, ERR_PTR(err));
1228 			goto out_deinit_ports;
1229 		}
1230 	}
1231 
1232 	err = ocelot_devlink_sb_register(ocelot);
1233 	if (err)
1234 		goto out_deinit_ports;
1235 
1236 	for (port = 0; port < ds->num_ports; port++) {
1237 		if (!dsa_is_cpu_port(ds, port))
1238 			continue;
1239 
1240 		/* The initial tag protocol is NPI which always returns 0, so
1241 		 * there's no real point in checking for errors.
1242 		 */
1243 		felix_set_tag_protocol(ds, port, felix->tag_proto);
1244 		break;
1245 	}
1246 
1247 	ds->mtu_enforcement_ingress = true;
1248 	ds->assisted_learning_on_cpu_port = true;
1249 
1250 	return 0;
1251 
1252 out_deinit_ports:
1253 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1254 		if (dsa_is_unused_port(ds, port))
1255 			continue;
1256 
1257 		felix_port_teardown_tagger_data(ds, port);
1258 		ocelot_deinit_port(ocelot, port);
1259 	}
1260 
1261 	kthread_destroy_worker(felix->xmit_worker);
1262 
1263 out_deinit_timestamp:
1264 	ocelot_deinit_timestamp(ocelot);
1265 	ocelot_deinit(ocelot);
1266 
1267 out_mdiobus_free:
1268 	if (felix->info->mdio_bus_free)
1269 		felix->info->mdio_bus_free(ocelot);
1270 
1271 	return err;
1272 }
1273 
felix_teardown(struct dsa_switch * ds)1274 static void felix_teardown(struct dsa_switch *ds)
1275 {
1276 	struct ocelot *ocelot = ds->priv;
1277 	struct felix *felix = ocelot_to_felix(ocelot);
1278 	int port;
1279 
1280 	for (port = 0; port < ds->num_ports; port++) {
1281 		if (!dsa_is_cpu_port(ds, port))
1282 			continue;
1283 
1284 		felix_del_tag_protocol(ds, port, felix->tag_proto);
1285 		break;
1286 	}
1287 
1288 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1289 		if (dsa_is_unused_port(ds, port))
1290 			continue;
1291 
1292 		felix_port_teardown_tagger_data(ds, port);
1293 		ocelot_deinit_port(ocelot, port);
1294 	}
1295 
1296 	kthread_destroy_worker(felix->xmit_worker);
1297 
1298 	ocelot_devlink_sb_unregister(ocelot);
1299 	ocelot_deinit_timestamp(ocelot);
1300 	ocelot_deinit(ocelot);
1301 
1302 	if (felix->info->mdio_bus_free)
1303 		felix->info->mdio_bus_free(ocelot);
1304 }
1305 
felix_hwtstamp_get(struct dsa_switch * ds,int port,struct ifreq * ifr)1306 static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
1307 			      struct ifreq *ifr)
1308 {
1309 	struct ocelot *ocelot = ds->priv;
1310 
1311 	return ocelot_hwstamp_get(ocelot, port, ifr);
1312 }
1313 
felix_hwtstamp_set(struct dsa_switch * ds,int port,struct ifreq * ifr)1314 static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
1315 			      struct ifreq *ifr)
1316 {
1317 	struct ocelot *ocelot = ds->priv;
1318 
1319 	return ocelot_hwstamp_set(ocelot, port, ifr);
1320 }
1321 
felix_check_xtr_pkt(struct ocelot * ocelot,unsigned int ptp_type)1322 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
1323 {
1324 	struct felix *felix = ocelot_to_felix(ocelot);
1325 	int err, grp = 0;
1326 
1327 	if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
1328 		return false;
1329 
1330 	if (!felix->info->quirk_no_xtr_irq)
1331 		return false;
1332 
1333 	if (ptp_type == PTP_CLASS_NONE)
1334 		return false;
1335 
1336 	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
1337 		struct sk_buff *skb;
1338 		unsigned int type;
1339 
1340 		err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
1341 		if (err)
1342 			goto out;
1343 
1344 		/* We trap to the CPU port module all PTP frames, but
1345 		 * felix_rxtstamp() only gets called for event frames.
1346 		 * So we need to avoid sending duplicate general
1347 		 * message frames by running a second BPF classifier
1348 		 * here and dropping those.
1349 		 */
1350 		__skb_push(skb, ETH_HLEN);
1351 
1352 		type = ptp_classify_raw(skb);
1353 
1354 		__skb_pull(skb, ETH_HLEN);
1355 
1356 		if (type == PTP_CLASS_NONE) {
1357 			kfree_skb(skb);
1358 			continue;
1359 		}
1360 
1361 		netif_rx(skb);
1362 	}
1363 
1364 out:
1365 	if (err < 0)
1366 		ocelot_drain_cpu_queue(ocelot, 0);
1367 
1368 	return true;
1369 }
1370 
felix_rxtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb,unsigned int type)1371 static bool felix_rxtstamp(struct dsa_switch *ds, int port,
1372 			   struct sk_buff *skb, unsigned int type)
1373 {
1374 	u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
1375 	struct skb_shared_hwtstamps *shhwtstamps;
1376 	struct ocelot *ocelot = ds->priv;
1377 	struct timespec64 ts;
1378 	u32 tstamp_hi;
1379 	u64 tstamp;
1380 
1381 	/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
1382 	 * for RX timestamping. Then free it, and poll for its copy through
1383 	 * MMIO in the CPU port module, and inject that into the stack from
1384 	 * ocelot_xtr_poll().
1385 	 */
1386 	if (felix_check_xtr_pkt(ocelot, type)) {
1387 		kfree_skb(skb);
1388 		return true;
1389 	}
1390 
1391 	ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
1392 	tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
1393 
1394 	tstamp_hi = tstamp >> 32;
1395 	if ((tstamp & 0xffffffff) < tstamp_lo)
1396 		tstamp_hi--;
1397 
1398 	tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
1399 
1400 	shhwtstamps = skb_hwtstamps(skb);
1401 	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1402 	shhwtstamps->hwtstamp = tstamp;
1403 	return false;
1404 }
1405 
felix_txtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb)1406 static void felix_txtstamp(struct dsa_switch *ds, int port,
1407 			   struct sk_buff *skb)
1408 {
1409 	struct ocelot *ocelot = ds->priv;
1410 	struct sk_buff *clone = NULL;
1411 
1412 	if (!ocelot->ptp)
1413 		return;
1414 
1415 	if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
1416 		dev_err_ratelimited(ds->dev,
1417 				    "port %d delivering skb without TX timestamp\n",
1418 				    port);
1419 		return;
1420 	}
1421 
1422 	if (clone)
1423 		OCELOT_SKB_CB(skb)->clone = clone;
1424 }
1425 
felix_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1426 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1427 {
1428 	struct ocelot *ocelot = ds->priv;
1429 
1430 	ocelot_port_set_maxlen(ocelot, port, new_mtu);
1431 
1432 	return 0;
1433 }
1434 
felix_get_max_mtu(struct dsa_switch * ds,int port)1435 static int felix_get_max_mtu(struct dsa_switch *ds, int port)
1436 {
1437 	struct ocelot *ocelot = ds->priv;
1438 
1439 	return ocelot_get_max_mtu(ocelot, port);
1440 }
1441 
felix_cls_flower_add(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1442 static int felix_cls_flower_add(struct dsa_switch *ds, int port,
1443 				struct flow_cls_offload *cls, bool ingress)
1444 {
1445 	struct ocelot *ocelot = ds->priv;
1446 
1447 	return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
1448 }
1449 
felix_cls_flower_del(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1450 static int felix_cls_flower_del(struct dsa_switch *ds, int port,
1451 				struct flow_cls_offload *cls, bool ingress)
1452 {
1453 	struct ocelot *ocelot = ds->priv;
1454 
1455 	return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
1456 }
1457 
felix_cls_flower_stats(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1458 static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
1459 				  struct flow_cls_offload *cls, bool ingress)
1460 {
1461 	struct ocelot *ocelot = ds->priv;
1462 
1463 	return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
1464 }
1465 
felix_port_policer_add(struct dsa_switch * ds,int port,struct dsa_mall_policer_tc_entry * policer)1466 static int felix_port_policer_add(struct dsa_switch *ds, int port,
1467 				  struct dsa_mall_policer_tc_entry *policer)
1468 {
1469 	struct ocelot *ocelot = ds->priv;
1470 	struct ocelot_policer pol = {
1471 		.rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
1472 		.burst = policer->burst,
1473 	};
1474 
1475 	return ocelot_port_policer_add(ocelot, port, &pol);
1476 }
1477 
felix_port_policer_del(struct dsa_switch * ds,int port)1478 static void felix_port_policer_del(struct dsa_switch *ds, int port)
1479 {
1480 	struct ocelot *ocelot = ds->priv;
1481 
1482 	ocelot_port_policer_del(ocelot, port);
1483 }
1484 
felix_port_setup_tc(struct dsa_switch * ds,int port,enum tc_setup_type type,void * type_data)1485 static int felix_port_setup_tc(struct dsa_switch *ds, int port,
1486 			       enum tc_setup_type type,
1487 			       void *type_data)
1488 {
1489 	struct ocelot *ocelot = ds->priv;
1490 	struct felix *felix = ocelot_to_felix(ocelot);
1491 
1492 	if (felix->info->port_setup_tc)
1493 		return felix->info->port_setup_tc(ds, port, type, type_data);
1494 	else
1495 		return -EOPNOTSUPP;
1496 }
1497 
felix_sb_pool_get(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)1498 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
1499 			     u16 pool_index,
1500 			     struct devlink_sb_pool_info *pool_info)
1501 {
1502 	struct ocelot *ocelot = ds->priv;
1503 
1504 	return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
1505 }
1506 
felix_sb_pool_set(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)1507 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
1508 			     u16 pool_index, u32 size,
1509 			     enum devlink_sb_threshold_type threshold_type,
1510 			     struct netlink_ext_ack *extack)
1511 {
1512 	struct ocelot *ocelot = ds->priv;
1513 
1514 	return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
1515 				  threshold_type, extack);
1516 }
1517 
felix_sb_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)1518 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
1519 				  unsigned int sb_index, u16 pool_index,
1520 				  u32 *p_threshold)
1521 {
1522 	struct ocelot *ocelot = ds->priv;
1523 
1524 	return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
1525 				       p_threshold);
1526 }
1527 
felix_sb_port_pool_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1528 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
1529 				  unsigned int sb_index, u16 pool_index,
1530 				  u32 threshold, struct netlink_ext_ack *extack)
1531 {
1532 	struct ocelot *ocelot = ds->priv;
1533 
1534 	return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
1535 				       threshold, extack);
1536 }
1537 
felix_sb_tc_pool_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)1538 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
1539 				     unsigned int sb_index, u16 tc_index,
1540 				     enum devlink_sb_pool_type pool_type,
1541 				     u16 *p_pool_index, u32 *p_threshold)
1542 {
1543 	struct ocelot *ocelot = ds->priv;
1544 
1545 	return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
1546 					  pool_type, p_pool_index,
1547 					  p_threshold);
1548 }
1549 
felix_sb_tc_pool_bind_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1550 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
1551 				     unsigned int sb_index, u16 tc_index,
1552 				     enum devlink_sb_pool_type pool_type,
1553 				     u16 pool_index, u32 threshold,
1554 				     struct netlink_ext_ack *extack)
1555 {
1556 	struct ocelot *ocelot = ds->priv;
1557 
1558 	return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
1559 					  pool_type, pool_index, threshold,
1560 					  extack);
1561 }
1562 
felix_sb_occ_snapshot(struct dsa_switch * ds,unsigned int sb_index)1563 static int felix_sb_occ_snapshot(struct dsa_switch *ds,
1564 				 unsigned int sb_index)
1565 {
1566 	struct ocelot *ocelot = ds->priv;
1567 
1568 	return ocelot_sb_occ_snapshot(ocelot, sb_index);
1569 }
1570 
felix_sb_occ_max_clear(struct dsa_switch * ds,unsigned int sb_index)1571 static int felix_sb_occ_max_clear(struct dsa_switch *ds,
1572 				  unsigned int sb_index)
1573 {
1574 	struct ocelot *ocelot = ds->priv;
1575 
1576 	return ocelot_sb_occ_max_clear(ocelot, sb_index);
1577 }
1578 
felix_sb_occ_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)1579 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
1580 				      unsigned int sb_index, u16 pool_index,
1581 				      u32 *p_cur, u32 *p_max)
1582 {
1583 	struct ocelot *ocelot = ds->priv;
1584 
1585 	return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
1586 					   p_cur, p_max);
1587 }
1588 
felix_sb_occ_tc_port_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1589 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
1590 					 unsigned int sb_index, u16 tc_index,
1591 					 enum devlink_sb_pool_type pool_type,
1592 					 u32 *p_cur, u32 *p_max)
1593 {
1594 	struct ocelot *ocelot = ds->priv;
1595 
1596 	return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
1597 					      pool_type, p_cur, p_max);
1598 }
1599 
felix_mrp_add(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)1600 static int felix_mrp_add(struct dsa_switch *ds, int port,
1601 			 const struct switchdev_obj_mrp *mrp)
1602 {
1603 	struct ocelot *ocelot = ds->priv;
1604 
1605 	return ocelot_mrp_add(ocelot, port, mrp);
1606 }
1607 
felix_mrp_del(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)1608 static int felix_mrp_del(struct dsa_switch *ds, int port,
1609 			 const struct switchdev_obj_mrp *mrp)
1610 {
1611 	struct ocelot *ocelot = ds->priv;
1612 
1613 	return ocelot_mrp_add(ocelot, port, mrp);
1614 }
1615 
1616 static int
felix_mrp_add_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)1617 felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
1618 			const struct switchdev_obj_ring_role_mrp *mrp)
1619 {
1620 	struct ocelot *ocelot = ds->priv;
1621 
1622 	return ocelot_mrp_add_ring_role(ocelot, port, mrp);
1623 }
1624 
1625 static int
felix_mrp_del_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)1626 felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
1627 			const struct switchdev_obj_ring_role_mrp *mrp)
1628 {
1629 	struct ocelot *ocelot = ds->priv;
1630 
1631 	return ocelot_mrp_del_ring_role(ocelot, port, mrp);
1632 }
1633 
1634 const struct dsa_switch_ops felix_switch_ops = {
1635 	.get_tag_protocol		= felix_get_tag_protocol,
1636 	.change_tag_protocol		= felix_change_tag_protocol,
1637 	.setup				= felix_setup,
1638 	.teardown			= felix_teardown,
1639 	.set_ageing_time		= felix_set_ageing_time,
1640 	.get_strings			= felix_get_strings,
1641 	.get_ethtool_stats		= felix_get_ethtool_stats,
1642 	.get_sset_count			= felix_get_sset_count,
1643 	.get_ts_info			= felix_get_ts_info,
1644 	.phylink_validate		= felix_phylink_validate,
1645 	.phylink_mac_config		= felix_phylink_mac_config,
1646 	.phylink_mac_link_down		= felix_phylink_mac_link_down,
1647 	.phylink_mac_link_up		= felix_phylink_mac_link_up,
1648 	.port_fdb_dump			= felix_fdb_dump,
1649 	.port_fdb_add			= felix_fdb_add,
1650 	.port_fdb_del			= felix_fdb_del,
1651 	.port_mdb_add			= felix_mdb_add,
1652 	.port_mdb_del			= felix_mdb_del,
1653 	.port_pre_bridge_flags		= felix_pre_bridge_flags,
1654 	.port_bridge_flags		= felix_bridge_flags,
1655 	.port_bridge_join		= felix_bridge_join,
1656 	.port_bridge_leave		= felix_bridge_leave,
1657 	.port_lag_join			= felix_lag_join,
1658 	.port_lag_leave			= felix_lag_leave,
1659 	.port_lag_change		= felix_lag_change,
1660 	.port_stp_state_set		= felix_bridge_stp_state_set,
1661 	.port_vlan_filtering		= felix_vlan_filtering,
1662 	.port_vlan_add			= felix_vlan_add,
1663 	.port_vlan_del			= felix_vlan_del,
1664 	.port_hwtstamp_get		= felix_hwtstamp_get,
1665 	.port_hwtstamp_set		= felix_hwtstamp_set,
1666 	.port_rxtstamp			= felix_rxtstamp,
1667 	.port_txtstamp			= felix_txtstamp,
1668 	.port_change_mtu		= felix_change_mtu,
1669 	.port_max_mtu			= felix_get_max_mtu,
1670 	.port_policer_add		= felix_port_policer_add,
1671 	.port_policer_del		= felix_port_policer_del,
1672 	.cls_flower_add			= felix_cls_flower_add,
1673 	.cls_flower_del			= felix_cls_flower_del,
1674 	.cls_flower_stats		= felix_cls_flower_stats,
1675 	.port_setup_tc			= felix_port_setup_tc,
1676 	.devlink_sb_pool_get		= felix_sb_pool_get,
1677 	.devlink_sb_pool_set		= felix_sb_pool_set,
1678 	.devlink_sb_port_pool_get	= felix_sb_port_pool_get,
1679 	.devlink_sb_port_pool_set	= felix_sb_port_pool_set,
1680 	.devlink_sb_tc_pool_bind_get	= felix_sb_tc_pool_bind_get,
1681 	.devlink_sb_tc_pool_bind_set	= felix_sb_tc_pool_bind_set,
1682 	.devlink_sb_occ_snapshot	= felix_sb_occ_snapshot,
1683 	.devlink_sb_occ_max_clear	= felix_sb_occ_max_clear,
1684 	.devlink_sb_occ_port_pool_get	= felix_sb_occ_port_pool_get,
1685 	.devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
1686 	.port_mrp_add			= felix_mrp_add,
1687 	.port_mrp_del			= felix_mrp_del,
1688 	.port_mrp_add_ring_role		= felix_mrp_add_ring_role,
1689 	.port_mrp_del_ring_role		= felix_mrp_del_ring_role,
1690 	.tag_8021q_vlan_add		= felix_tag_8021q_vlan_add,
1691 	.tag_8021q_vlan_del		= felix_tag_8021q_vlan_del,
1692 };
1693 
felix_port_to_netdev(struct ocelot * ocelot,int port)1694 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
1695 {
1696 	struct felix *felix = ocelot_to_felix(ocelot);
1697 	struct dsa_switch *ds = felix->ds;
1698 
1699 	if (!dsa_is_user_port(ds, port))
1700 		return NULL;
1701 
1702 	return dsa_to_port(ds, port)->slave;
1703 }
1704 
felix_netdev_to_port(struct net_device * dev)1705 int felix_netdev_to_port(struct net_device *dev)
1706 {
1707 	struct dsa_port *dp;
1708 
1709 	dp = dsa_port_from_netdev(dev);
1710 	if (IS_ERR(dp))
1711 		return -EINVAL;
1712 
1713 	return dp->index;
1714 }
1715