1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handling of a single switch port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17 * dsa_port_notify - Notify the switching fabric of changes to a port
18 * @dp: port on which change occurred
19 * @e: event, must be of type DSA_NOTIFIER_*
20 * @v: event-specific value.
21 *
22 * Notify all switches in the DSA tree that this port's switch belongs to,
23 * including this switch itself, of an event. Allows the other switches to
24 * reconfigure themselves for cross-chip operations. Can also be used to
25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26 * a user port's state changes.
27 */
dsa_port_notify(const struct dsa_port * dp,unsigned long e,void * v)28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
dsa_port_notify_bridge_fdb_flush(const struct dsa_port * dp)33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36 struct switchdev_notifier_fdb_info info = {
37 /* flush all VLANs */
38 .vid = 0,
39 };
40
41 /* When the port becomes standalone it has already left the bridge.
42 * Don't notify the bridge in that case.
43 */
44 if (!brport_dev)
45 return;
46
47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48 brport_dev, &info.info, NULL);
49 }
50
dsa_port_fast_age(const struct dsa_port * dp)51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53 struct dsa_switch *ds = dp->ds;
54
55 if (!ds->ops->port_fast_age)
56 return;
57
58 ds->ops->port_fast_age(ds, dp->index);
59
60 dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
dsa_port_can_configure_learning(struct dsa_port * dp)63 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
64 {
65 struct switchdev_brport_flags flags = {
66 .mask = BR_LEARNING,
67 };
68 struct dsa_switch *ds = dp->ds;
69 int err;
70
71 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
72 return false;
73
74 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
75 return !err;
76 }
77
dsa_port_supports_hwtstamp(struct dsa_port * dp,struct ifreq * ifr)78 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr)
79 {
80 struct dsa_switch *ds = dp->ds;
81 int err;
82
83 if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
84 return false;
85
86 /* "See through" shim implementations of the "get" method.
87 * This will clobber the ifreq structure, but we will either return an
88 * error, or the master will overwrite it with proper values.
89 */
90 err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr);
91 return err != -EOPNOTSUPP;
92 }
93
dsa_port_set_state(struct dsa_port * dp,u8 state,bool do_fast_age)94 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
95 {
96 struct dsa_switch *ds = dp->ds;
97 int port = dp->index;
98
99 if (!ds->ops->port_stp_state_set)
100 return -EOPNOTSUPP;
101
102 ds->ops->port_stp_state_set(ds, port, state);
103
104 if (!dsa_port_can_configure_learning(dp) ||
105 (do_fast_age && dp->learning)) {
106 /* Fast age FDB entries or flush appropriate forwarding database
107 * for the given port, if we are moving it from Learning or
108 * Forwarding state, to Disabled or Blocking or Listening state.
109 * Ports that were standalone before the STP state change don't
110 * need to fast age the FDB, since address learning is off in
111 * standalone mode.
112 */
113
114 if ((dp->stp_state == BR_STATE_LEARNING ||
115 dp->stp_state == BR_STATE_FORWARDING) &&
116 (state == BR_STATE_DISABLED ||
117 state == BR_STATE_BLOCKING ||
118 state == BR_STATE_LISTENING))
119 dsa_port_fast_age(dp);
120 }
121
122 dp->stp_state = state;
123
124 return 0;
125 }
126
dsa_port_set_state_now(struct dsa_port * dp,u8 state,bool do_fast_age)127 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
128 bool do_fast_age)
129 {
130 struct dsa_switch *ds = dp->ds;
131 int err;
132
133 err = dsa_port_set_state(dp, state, do_fast_age);
134 if (err && err != -EOPNOTSUPP) {
135 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
136 dp->index, state, ERR_PTR(err));
137 }
138 }
139
dsa_port_enable_rt(struct dsa_port * dp,struct phy_device * phy)140 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
141 {
142 struct dsa_switch *ds = dp->ds;
143 int port = dp->index;
144 int err;
145
146 if (ds->ops->port_enable) {
147 err = ds->ops->port_enable(ds, port, phy);
148 if (err)
149 return err;
150 }
151
152 if (!dp->bridge_dev)
153 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
154
155 if (dp->pl)
156 phylink_start(dp->pl);
157
158 return 0;
159 }
160
dsa_port_enable(struct dsa_port * dp,struct phy_device * phy)161 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
162 {
163 int err;
164
165 rtnl_lock();
166 err = dsa_port_enable_rt(dp, phy);
167 rtnl_unlock();
168
169 return err;
170 }
171
dsa_port_disable_rt(struct dsa_port * dp)172 void dsa_port_disable_rt(struct dsa_port *dp)
173 {
174 struct dsa_switch *ds = dp->ds;
175 int port = dp->index;
176
177 if (dp->pl)
178 phylink_stop(dp->pl);
179
180 if (!dp->bridge_dev)
181 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
182
183 if (ds->ops->port_disable)
184 ds->ops->port_disable(ds, port);
185 }
186
dsa_port_disable(struct dsa_port * dp)187 void dsa_port_disable(struct dsa_port *dp)
188 {
189 rtnl_lock();
190 dsa_port_disable_rt(dp);
191 rtnl_unlock();
192 }
193
dsa_port_inherit_brport_flags(struct dsa_port * dp,struct netlink_ext_ack * extack)194 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
195 struct netlink_ext_ack *extack)
196 {
197 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
198 BR_BCAST_FLOOD;
199 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
200 int flag, err;
201
202 for_each_set_bit(flag, &mask, 32) {
203 struct switchdev_brport_flags flags = {0};
204
205 flags.mask = BIT(flag);
206
207 if (br_port_flag_is_set(brport_dev, BIT(flag)))
208 flags.val = BIT(flag);
209
210 err = dsa_port_bridge_flags(dp, flags, extack);
211 if (err && err != -EOPNOTSUPP)
212 return err;
213 }
214
215 return 0;
216 }
217
dsa_port_clear_brport_flags(struct dsa_port * dp)218 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
219 {
220 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
221 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
222 BR_BCAST_FLOOD;
223 int flag, err;
224
225 for_each_set_bit(flag, &mask, 32) {
226 struct switchdev_brport_flags flags = {0};
227
228 flags.mask = BIT(flag);
229 flags.val = val & BIT(flag);
230
231 err = dsa_port_bridge_flags(dp, flags, NULL);
232 if (err && err != -EOPNOTSUPP)
233 dev_err(dp->ds->dev,
234 "failed to clear bridge port flag %lu: %pe\n",
235 flags.val, ERR_PTR(err));
236 }
237 }
238
dsa_port_switchdev_sync_attrs(struct dsa_port * dp,struct netlink_ext_ack * extack)239 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
240 struct netlink_ext_ack *extack)
241 {
242 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
243 struct net_device *br = dp->bridge_dev;
244 int err;
245
246 err = dsa_port_inherit_brport_flags(dp, extack);
247 if (err)
248 return err;
249
250 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
251 if (err && err != -EOPNOTSUPP)
252 return err;
253
254 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
255 if (err && err != -EOPNOTSUPP)
256 return err;
257
258 err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
259 if (err && err != -EOPNOTSUPP)
260 return err;
261
262 return 0;
263 }
264
dsa_port_switchdev_unsync_attrs(struct dsa_port * dp)265 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
266 {
267 /* Configure the port for standalone mode (no address learning,
268 * flood everything).
269 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
270 * when the user requests it through netlink or sysfs, but not
271 * automatically at port join or leave, so we need to handle resetting
272 * the brport flags ourselves. But we even prefer it that way, because
273 * otherwise, some setups might never get the notification they need,
274 * for example, when a port leaves a LAG that offloads the bridge,
275 * it becomes standalone, but as far as the bridge is concerned, no
276 * port ever left.
277 */
278 dsa_port_clear_brport_flags(dp);
279
280 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
281 * so allow it to be in BR_STATE_FORWARDING to be kept functional
282 */
283 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
284
285 /* VLAN filtering is handled by dsa_switch_bridge_leave */
286
287 /* Ageing time may be global to the switch chip, so don't change it
288 * here because we have no good reason (or value) to change it to.
289 */
290 }
291
dsa_port_bridge_tx_fwd_unoffload(struct dsa_port * dp,struct net_device * bridge_dev)292 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
293 struct net_device *bridge_dev)
294 {
295 int bridge_num = dp->bridge_num;
296 struct dsa_switch *ds = dp->ds;
297
298 /* No bridge TX forwarding offload => do nothing */
299 if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
300 return;
301
302 dp->bridge_num = -1;
303
304 dsa_bridge_num_put(bridge_dev, bridge_num);
305
306 /* Notify the chips only once the offload has been deactivated, so
307 * that they can update their configuration accordingly.
308 */
309 ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
310 bridge_num);
311 }
312
dsa_port_bridge_tx_fwd_offload(struct dsa_port * dp,struct net_device * bridge_dev)313 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
314 struct net_device *bridge_dev)
315 {
316 struct dsa_switch *ds = dp->ds;
317 int bridge_num, err;
318
319 if (!ds->ops->port_bridge_tx_fwd_offload)
320 return false;
321
322 bridge_num = dsa_bridge_num_get(bridge_dev,
323 ds->num_fwd_offloading_bridges);
324 if (bridge_num < 0)
325 return false;
326
327 dp->bridge_num = bridge_num;
328
329 /* Notify the driver */
330 err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
331 bridge_num);
332 if (err) {
333 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
334 return false;
335 }
336
337 return true;
338 }
339
dsa_port_bridge_join(struct dsa_port * dp,struct net_device * br,struct netlink_ext_ack * extack)340 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
341 struct netlink_ext_ack *extack)
342 {
343 struct dsa_notifier_bridge_info info = {
344 .tree_index = dp->ds->dst->index,
345 .sw_index = dp->ds->index,
346 .port = dp->index,
347 .br = br,
348 };
349 struct net_device *dev = dp->slave;
350 struct net_device *brport_dev;
351 bool tx_fwd_offload;
352 int err;
353
354 /* Here the interface is already bridged. Reflect the current
355 * configuration so that drivers can program their chips accordingly.
356 */
357 dp->bridge_dev = br;
358
359 brport_dev = dsa_port_to_bridge_port(dp);
360
361 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
362 if (err)
363 goto out_rollback;
364
365 tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
366
367 err = switchdev_bridge_port_offload(brport_dev, dev, dp,
368 &dsa_slave_switchdev_notifier,
369 &dsa_slave_switchdev_blocking_notifier,
370 tx_fwd_offload, extack);
371 if (err)
372 goto out_rollback_unbridge;
373
374 err = dsa_port_switchdev_sync_attrs(dp, extack);
375 if (err)
376 goto out_rollback_unoffload;
377
378 return 0;
379
380 out_rollback_unoffload:
381 switchdev_bridge_port_unoffload(brport_dev, dp,
382 &dsa_slave_switchdev_notifier,
383 &dsa_slave_switchdev_blocking_notifier);
384 out_rollback_unbridge:
385 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
386 out_rollback:
387 dp->bridge_dev = NULL;
388 return err;
389 }
390
dsa_port_pre_bridge_leave(struct dsa_port * dp,struct net_device * br)391 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
392 {
393 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
394
395 /* Don't try to unoffload something that is not offloaded */
396 if (!brport_dev)
397 return;
398
399 switchdev_bridge_port_unoffload(brport_dev, dp,
400 &dsa_slave_switchdev_notifier,
401 &dsa_slave_switchdev_blocking_notifier);
402 }
403
dsa_port_bridge_leave(struct dsa_port * dp,struct net_device * br)404 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
405 {
406 struct dsa_notifier_bridge_info info = {
407 .tree_index = dp->ds->dst->index,
408 .sw_index = dp->ds->index,
409 .port = dp->index,
410 .br = br,
411 };
412 int err;
413
414 /* Here the port is already unbridged. Reflect the current configuration
415 * so that drivers can program their chips accordingly.
416 */
417 dp->bridge_dev = NULL;
418
419 dsa_port_bridge_tx_fwd_unoffload(dp, br);
420
421 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
422 if (err)
423 dev_err(dp->ds->dev,
424 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
425 dp->index, ERR_PTR(err));
426
427 dsa_port_switchdev_unsync_attrs(dp);
428 }
429
dsa_port_lag_change(struct dsa_port * dp,struct netdev_lag_lower_state_info * linfo)430 int dsa_port_lag_change(struct dsa_port *dp,
431 struct netdev_lag_lower_state_info *linfo)
432 {
433 struct dsa_notifier_lag_info info = {
434 .sw_index = dp->ds->index,
435 .port = dp->index,
436 };
437 bool tx_enabled;
438
439 if (!dp->lag_dev)
440 return 0;
441
442 /* On statically configured aggregates (e.g. loadbalance
443 * without LACP) ports will always be tx_enabled, even if the
444 * link is down. Thus we require both link_up and tx_enabled
445 * in order to include it in the tx set.
446 */
447 tx_enabled = linfo->link_up && linfo->tx_enabled;
448
449 if (tx_enabled == dp->lag_tx_enabled)
450 return 0;
451
452 dp->lag_tx_enabled = tx_enabled;
453
454 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
455 }
456
dsa_port_lag_join(struct dsa_port * dp,struct net_device * lag,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)457 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
458 struct netdev_lag_upper_info *uinfo,
459 struct netlink_ext_ack *extack)
460 {
461 struct dsa_notifier_lag_info info = {
462 .sw_index = dp->ds->index,
463 .port = dp->index,
464 .lag = lag,
465 .info = uinfo,
466 };
467 struct net_device *bridge_dev;
468 int err;
469
470 dsa_lag_map(dp->ds->dst, lag);
471 dp->lag_dev = lag;
472
473 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
474 if (err)
475 goto err_lag_join;
476
477 bridge_dev = netdev_master_upper_dev_get(lag);
478 if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
479 return 0;
480
481 err = dsa_port_bridge_join(dp, bridge_dev, extack);
482 if (err)
483 goto err_bridge_join;
484
485 return 0;
486
487 err_bridge_join:
488 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
489 err_lag_join:
490 dp->lag_dev = NULL;
491 dsa_lag_unmap(dp->ds->dst, lag);
492 return err;
493 }
494
dsa_port_pre_lag_leave(struct dsa_port * dp,struct net_device * lag)495 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
496 {
497 if (dp->bridge_dev)
498 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
499 }
500
dsa_port_lag_leave(struct dsa_port * dp,struct net_device * lag)501 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
502 {
503 struct dsa_notifier_lag_info info = {
504 .sw_index = dp->ds->index,
505 .port = dp->index,
506 .lag = lag,
507 };
508 int err;
509
510 if (!dp->lag_dev)
511 return;
512
513 /* Port might have been part of a LAG that in turn was
514 * attached to a bridge.
515 */
516 if (dp->bridge_dev)
517 dsa_port_bridge_leave(dp, dp->bridge_dev);
518
519 dp->lag_tx_enabled = false;
520 dp->lag_dev = NULL;
521
522 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
523 if (err)
524 dev_err(dp->ds->dev,
525 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
526 dp->index, ERR_PTR(err));
527
528 dsa_lag_unmap(dp->ds->dst, lag);
529 }
530
531 /* Must be called under rcu_read_lock() */
dsa_port_can_apply_vlan_filtering(struct dsa_port * dp,bool vlan_filtering,struct netlink_ext_ack * extack)532 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
533 bool vlan_filtering,
534 struct netlink_ext_ack *extack)
535 {
536 struct dsa_switch *ds = dp->ds;
537 int err, i;
538
539 /* VLAN awareness was off, so the question is "can we turn it on".
540 * We may have had 8021q uppers, those need to go. Make sure we don't
541 * enter an inconsistent state: deny changing the VLAN awareness state
542 * as long as we have 8021q uppers.
543 */
544 if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
545 struct net_device *upper_dev, *slave = dp->slave;
546 struct net_device *br = dp->bridge_dev;
547 struct list_head *iter;
548
549 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
550 struct bridge_vlan_info br_info;
551 u16 vid;
552
553 if (!is_vlan_dev(upper_dev))
554 continue;
555
556 vid = vlan_dev_vlan_id(upper_dev);
557
558 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
559 * device, respectively the VID is not found, returning
560 * 0 means success, which is a failure for us here.
561 */
562 err = br_vlan_get_info(br, vid, &br_info);
563 if (err == 0) {
564 NL_SET_ERR_MSG_MOD(extack,
565 "Must first remove VLAN uppers having VIDs also present in bridge");
566 return false;
567 }
568 }
569 }
570
571 if (!ds->vlan_filtering_is_global)
572 return true;
573
574 /* For cases where enabling/disabling VLAN awareness is global to the
575 * switch, we need to handle the case where multiple bridges span
576 * different ports of the same switch device and one of them has a
577 * different setting than what is being requested.
578 */
579 for (i = 0; i < ds->num_ports; i++) {
580 struct net_device *other_bridge;
581
582 other_bridge = dsa_to_port(ds, i)->bridge_dev;
583 if (!other_bridge)
584 continue;
585 /* If it's the same bridge, it also has same
586 * vlan_filtering setting => no need to check
587 */
588 if (other_bridge == dp->bridge_dev)
589 continue;
590 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
591 NL_SET_ERR_MSG_MOD(extack,
592 "VLAN filtering is a global setting");
593 return false;
594 }
595 }
596 return true;
597 }
598
dsa_port_vlan_filtering(struct dsa_port * dp,bool vlan_filtering,struct netlink_ext_ack * extack)599 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
600 struct netlink_ext_ack *extack)
601 {
602 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
603 struct dsa_switch *ds = dp->ds;
604 bool apply;
605 int err;
606
607 if (!ds->ops->port_vlan_filtering)
608 return -EOPNOTSUPP;
609
610 /* We are called from dsa_slave_switchdev_blocking_event(),
611 * which is not under rcu_read_lock(), unlike
612 * dsa_slave_switchdev_event().
613 */
614 rcu_read_lock();
615 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
616 rcu_read_unlock();
617 if (!apply)
618 return -EINVAL;
619
620 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
621 return 0;
622
623 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
624 extack);
625 if (err)
626 return err;
627
628 if (ds->vlan_filtering_is_global) {
629 int port;
630
631 ds->vlan_filtering = vlan_filtering;
632
633 for (port = 0; port < ds->num_ports; port++) {
634 struct net_device *slave;
635
636 if (!dsa_is_user_port(ds, port))
637 continue;
638
639 /* We might be called in the unbind path, so not
640 * all slave devices might still be registered.
641 */
642 slave = dsa_to_port(ds, port)->slave;
643 if (!slave)
644 continue;
645
646 err = dsa_slave_manage_vlan_filtering(slave,
647 vlan_filtering);
648 if (err)
649 goto restore;
650 }
651 } else {
652 dp->vlan_filtering = vlan_filtering;
653
654 err = dsa_slave_manage_vlan_filtering(dp->slave,
655 vlan_filtering);
656 if (err)
657 goto restore;
658 }
659
660 return 0;
661
662 restore:
663 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
664
665 if (ds->vlan_filtering_is_global)
666 ds->vlan_filtering = old_vlan_filtering;
667 else
668 dp->vlan_filtering = old_vlan_filtering;
669
670 return err;
671 }
672
673 /* This enforces legacy behavior for switch drivers which assume they can't
674 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
675 */
dsa_port_skip_vlan_configuration(struct dsa_port * dp)676 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
677 {
678 struct dsa_switch *ds = dp->ds;
679
680 if (!dp->bridge_dev)
681 return false;
682
683 return (!ds->configure_vlan_while_not_filtering &&
684 !br_vlan_enabled(dp->bridge_dev));
685 }
686
dsa_port_ageing_time(struct dsa_port * dp,clock_t ageing_clock)687 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
688 {
689 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
690 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
691 struct dsa_notifier_ageing_time_info info;
692 int err;
693
694 info.ageing_time = ageing_time;
695
696 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
697 if (err)
698 return err;
699
700 dp->ageing_time = ageing_time;
701
702 return 0;
703 }
704
dsa_port_pre_bridge_flags(const struct dsa_port * dp,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)705 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
706 struct switchdev_brport_flags flags,
707 struct netlink_ext_ack *extack)
708 {
709 struct dsa_switch *ds = dp->ds;
710
711 if (!ds->ops->port_pre_bridge_flags)
712 return -EINVAL;
713
714 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
715 }
716
dsa_port_bridge_flags(struct dsa_port * dp,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)717 int dsa_port_bridge_flags(struct dsa_port *dp,
718 struct switchdev_brport_flags flags,
719 struct netlink_ext_ack *extack)
720 {
721 struct dsa_switch *ds = dp->ds;
722 int err;
723
724 if (!ds->ops->port_bridge_flags)
725 return -EOPNOTSUPP;
726
727 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
728 if (err)
729 return err;
730
731 if (flags.mask & BR_LEARNING) {
732 bool learning = flags.val & BR_LEARNING;
733
734 if (learning == dp->learning)
735 return 0;
736
737 if ((dp->learning && !learning) &&
738 (dp->stp_state == BR_STATE_LEARNING ||
739 dp->stp_state == BR_STATE_FORWARDING))
740 dsa_port_fast_age(dp);
741
742 dp->learning = learning;
743 }
744
745 return 0;
746 }
747
dsa_port_mtu_change(struct dsa_port * dp,int new_mtu,bool targeted_match)748 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
749 bool targeted_match)
750 {
751 struct dsa_notifier_mtu_info info = {
752 .sw_index = dp->ds->index,
753 .targeted_match = targeted_match,
754 .port = dp->index,
755 .mtu = new_mtu,
756 };
757
758 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
759 }
760
dsa_port_fdb_add(struct dsa_port * dp,const unsigned char * addr,u16 vid)761 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
762 u16 vid)
763 {
764 struct dsa_notifier_fdb_info info = {
765 .sw_index = dp->ds->index,
766 .port = dp->index,
767 .addr = addr,
768 .vid = vid,
769 };
770
771 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
772 }
773
dsa_port_fdb_del(struct dsa_port * dp,const unsigned char * addr,u16 vid)774 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
775 u16 vid)
776 {
777 struct dsa_notifier_fdb_info info = {
778 .sw_index = dp->ds->index,
779 .port = dp->index,
780 .addr = addr,
781 .vid = vid,
782
783 };
784
785 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
786 }
787
dsa_port_host_fdb_add(struct dsa_port * dp,const unsigned char * addr,u16 vid)788 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
789 u16 vid)
790 {
791 struct dsa_notifier_fdb_info info = {
792 .sw_index = dp->ds->index,
793 .port = dp->index,
794 .addr = addr,
795 .vid = vid,
796 };
797 struct dsa_port *cpu_dp = dp->cpu_dp;
798 int err;
799
800 err = dev_uc_add(cpu_dp->master, addr);
801 if (err)
802 return err;
803
804 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
805 }
806
dsa_port_host_fdb_del(struct dsa_port * dp,const unsigned char * addr,u16 vid)807 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
808 u16 vid)
809 {
810 struct dsa_notifier_fdb_info info = {
811 .sw_index = dp->ds->index,
812 .port = dp->index,
813 .addr = addr,
814 .vid = vid,
815 };
816 struct dsa_port *cpu_dp = dp->cpu_dp;
817 int err;
818
819 err = dev_uc_del(cpu_dp->master, addr);
820 if (err)
821 return err;
822
823 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
824 }
825
dsa_port_fdb_dump(struct dsa_port * dp,dsa_fdb_dump_cb_t * cb,void * data)826 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
827 {
828 struct dsa_switch *ds = dp->ds;
829 int port = dp->index;
830
831 if (!ds->ops->port_fdb_dump)
832 return -EOPNOTSUPP;
833
834 return ds->ops->port_fdb_dump(ds, port, cb, data);
835 }
836
dsa_port_mdb_add(const struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb)837 int dsa_port_mdb_add(const struct dsa_port *dp,
838 const struct switchdev_obj_port_mdb *mdb)
839 {
840 struct dsa_notifier_mdb_info info = {
841 .sw_index = dp->ds->index,
842 .port = dp->index,
843 .mdb = mdb,
844 };
845
846 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
847 }
848
dsa_port_mdb_del(const struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb)849 int dsa_port_mdb_del(const struct dsa_port *dp,
850 const struct switchdev_obj_port_mdb *mdb)
851 {
852 struct dsa_notifier_mdb_info info = {
853 .sw_index = dp->ds->index,
854 .port = dp->index,
855 .mdb = mdb,
856 };
857
858 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
859 }
860
dsa_port_host_mdb_add(const struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb)861 int dsa_port_host_mdb_add(const struct dsa_port *dp,
862 const struct switchdev_obj_port_mdb *mdb)
863 {
864 struct dsa_notifier_mdb_info info = {
865 .sw_index = dp->ds->index,
866 .port = dp->index,
867 .mdb = mdb,
868 };
869 struct dsa_port *cpu_dp = dp->cpu_dp;
870 int err;
871
872 err = dev_mc_add(cpu_dp->master, mdb->addr);
873 if (err)
874 return err;
875
876 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
877 }
878
dsa_port_host_mdb_del(const struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb)879 int dsa_port_host_mdb_del(const struct dsa_port *dp,
880 const struct switchdev_obj_port_mdb *mdb)
881 {
882 struct dsa_notifier_mdb_info info = {
883 .sw_index = dp->ds->index,
884 .port = dp->index,
885 .mdb = mdb,
886 };
887 struct dsa_port *cpu_dp = dp->cpu_dp;
888 int err;
889
890 err = dev_mc_del(cpu_dp->master, mdb->addr);
891 if (err)
892 return err;
893
894 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
895 }
896
dsa_port_vlan_add(struct dsa_port * dp,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)897 int dsa_port_vlan_add(struct dsa_port *dp,
898 const struct switchdev_obj_port_vlan *vlan,
899 struct netlink_ext_ack *extack)
900 {
901 struct dsa_notifier_vlan_info info = {
902 .sw_index = dp->ds->index,
903 .port = dp->index,
904 .vlan = vlan,
905 .extack = extack,
906 };
907
908 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
909 }
910
dsa_port_vlan_del(struct dsa_port * dp,const struct switchdev_obj_port_vlan * vlan)911 int dsa_port_vlan_del(struct dsa_port *dp,
912 const struct switchdev_obj_port_vlan *vlan)
913 {
914 struct dsa_notifier_vlan_info info = {
915 .sw_index = dp->ds->index,
916 .port = dp->index,
917 .vlan = vlan,
918 };
919
920 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
921 }
922
dsa_port_mrp_add(const struct dsa_port * dp,const struct switchdev_obj_mrp * mrp)923 int dsa_port_mrp_add(const struct dsa_port *dp,
924 const struct switchdev_obj_mrp *mrp)
925 {
926 struct dsa_notifier_mrp_info info = {
927 .sw_index = dp->ds->index,
928 .port = dp->index,
929 .mrp = mrp,
930 };
931
932 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
933 }
934
dsa_port_mrp_del(const struct dsa_port * dp,const struct switchdev_obj_mrp * mrp)935 int dsa_port_mrp_del(const struct dsa_port *dp,
936 const struct switchdev_obj_mrp *mrp)
937 {
938 struct dsa_notifier_mrp_info info = {
939 .sw_index = dp->ds->index,
940 .port = dp->index,
941 .mrp = mrp,
942 };
943
944 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
945 }
946
dsa_port_mrp_add_ring_role(const struct dsa_port * dp,const struct switchdev_obj_ring_role_mrp * mrp)947 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
948 const struct switchdev_obj_ring_role_mrp *mrp)
949 {
950 struct dsa_notifier_mrp_ring_role_info info = {
951 .sw_index = dp->ds->index,
952 .port = dp->index,
953 .mrp = mrp,
954 };
955
956 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
957 }
958
dsa_port_mrp_del_ring_role(const struct dsa_port * dp,const struct switchdev_obj_ring_role_mrp * mrp)959 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
960 const struct switchdev_obj_ring_role_mrp *mrp)
961 {
962 struct dsa_notifier_mrp_ring_role_info info = {
963 .sw_index = dp->ds->index,
964 .port = dp->index,
965 .mrp = mrp,
966 };
967
968 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
969 }
970
dsa_port_set_tag_protocol(struct dsa_port * cpu_dp,const struct dsa_device_ops * tag_ops)971 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
972 const struct dsa_device_ops *tag_ops)
973 {
974 cpu_dp->rcv = tag_ops->rcv;
975 cpu_dp->tag_ops = tag_ops;
976 }
977
dsa_port_get_phy_device(struct dsa_port * dp)978 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
979 {
980 struct device_node *phy_dn;
981 struct phy_device *phydev;
982
983 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
984 if (!phy_dn)
985 return NULL;
986
987 phydev = of_phy_find_device(phy_dn);
988 if (!phydev) {
989 of_node_put(phy_dn);
990 return ERR_PTR(-EPROBE_DEFER);
991 }
992
993 of_node_put(phy_dn);
994 return phydev;
995 }
996
dsa_port_phylink_validate(struct phylink_config * config,unsigned long * supported,struct phylink_link_state * state)997 static void dsa_port_phylink_validate(struct phylink_config *config,
998 unsigned long *supported,
999 struct phylink_link_state *state)
1000 {
1001 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1002 struct dsa_switch *ds = dp->ds;
1003
1004 if (!ds->ops->phylink_validate)
1005 return;
1006
1007 ds->ops->phylink_validate(ds, dp->index, supported, state);
1008 }
1009
dsa_port_phylink_mac_pcs_get_state(struct phylink_config * config,struct phylink_link_state * state)1010 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
1011 struct phylink_link_state *state)
1012 {
1013 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1014 struct dsa_switch *ds = dp->ds;
1015 int err;
1016
1017 /* Only called for inband modes */
1018 if (!ds->ops->phylink_mac_link_state) {
1019 state->link = 0;
1020 return;
1021 }
1022
1023 err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1024 if (err < 0) {
1025 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1026 dp->index, err);
1027 state->link = 0;
1028 }
1029 }
1030
dsa_port_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1031 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1032 unsigned int mode,
1033 const struct phylink_link_state *state)
1034 {
1035 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1036 struct dsa_switch *ds = dp->ds;
1037
1038 if (!ds->ops->phylink_mac_config)
1039 return;
1040
1041 ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1042 }
1043
dsa_port_phylink_mac_an_restart(struct phylink_config * config)1044 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1045 {
1046 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1047 struct dsa_switch *ds = dp->ds;
1048
1049 if (!ds->ops->phylink_mac_an_restart)
1050 return;
1051
1052 ds->ops->phylink_mac_an_restart(ds, dp->index);
1053 }
1054
dsa_port_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1055 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1056 unsigned int mode,
1057 phy_interface_t interface)
1058 {
1059 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1060 struct phy_device *phydev = NULL;
1061 struct dsa_switch *ds = dp->ds;
1062
1063 if (dsa_is_user_port(ds, dp->index))
1064 phydev = dp->slave->phydev;
1065
1066 if (!ds->ops->phylink_mac_link_down) {
1067 if (ds->ops->adjust_link && phydev)
1068 ds->ops->adjust_link(ds, dp->index, phydev);
1069 return;
1070 }
1071
1072 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1073 }
1074
dsa_port_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1075 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1076 struct phy_device *phydev,
1077 unsigned int mode,
1078 phy_interface_t interface,
1079 int speed, int duplex,
1080 bool tx_pause, bool rx_pause)
1081 {
1082 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1083 struct dsa_switch *ds = dp->ds;
1084
1085 if (!ds->ops->phylink_mac_link_up) {
1086 if (ds->ops->adjust_link && phydev)
1087 ds->ops->adjust_link(ds, dp->index, phydev);
1088 return;
1089 }
1090
1091 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1092 speed, duplex, tx_pause, rx_pause);
1093 }
1094
1095 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1096 .validate = dsa_port_phylink_validate,
1097 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1098 .mac_config = dsa_port_phylink_mac_config,
1099 .mac_an_restart = dsa_port_phylink_mac_an_restart,
1100 .mac_link_down = dsa_port_phylink_mac_link_down,
1101 .mac_link_up = dsa_port_phylink_mac_link_up,
1102 };
1103
dsa_port_setup_phy_of(struct dsa_port * dp,bool enable)1104 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1105 {
1106 struct dsa_switch *ds = dp->ds;
1107 struct phy_device *phydev;
1108 int port = dp->index;
1109 int err = 0;
1110
1111 phydev = dsa_port_get_phy_device(dp);
1112 if (!phydev)
1113 return 0;
1114
1115 if (IS_ERR(phydev))
1116 return PTR_ERR(phydev);
1117
1118 if (enable) {
1119 err = genphy_resume(phydev);
1120 if (err < 0)
1121 goto err_put_dev;
1122
1123 err = genphy_read_status(phydev);
1124 if (err < 0)
1125 goto err_put_dev;
1126 } else {
1127 err = genphy_suspend(phydev);
1128 if (err < 0)
1129 goto err_put_dev;
1130 }
1131
1132 if (ds->ops->adjust_link)
1133 ds->ops->adjust_link(ds, port, phydev);
1134
1135 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1136
1137 err_put_dev:
1138 put_device(&phydev->mdio.dev);
1139 return err;
1140 }
1141
dsa_port_fixed_link_register_of(struct dsa_port * dp)1142 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1143 {
1144 struct device_node *dn = dp->dn;
1145 struct dsa_switch *ds = dp->ds;
1146 struct phy_device *phydev;
1147 int port = dp->index;
1148 phy_interface_t mode;
1149 int err;
1150
1151 err = of_phy_register_fixed_link(dn);
1152 if (err) {
1153 dev_err(ds->dev,
1154 "failed to register the fixed PHY of port %d\n",
1155 port);
1156 return err;
1157 }
1158
1159 phydev = of_phy_find_device(dn);
1160
1161 err = of_get_phy_mode(dn, &mode);
1162 if (err)
1163 mode = PHY_INTERFACE_MODE_NA;
1164 phydev->interface = mode;
1165
1166 genphy_read_status(phydev);
1167
1168 if (ds->ops->adjust_link)
1169 ds->ops->adjust_link(ds, port, phydev);
1170
1171 put_device(&phydev->mdio.dev);
1172
1173 return 0;
1174 }
1175
dsa_port_phylink_register(struct dsa_port * dp)1176 static int dsa_port_phylink_register(struct dsa_port *dp)
1177 {
1178 struct dsa_switch *ds = dp->ds;
1179 struct device_node *port_dn = dp->dn;
1180 phy_interface_t mode;
1181 int err;
1182
1183 err = of_get_phy_mode(port_dn, &mode);
1184 if (err)
1185 mode = PHY_INTERFACE_MODE_NA;
1186
1187 dp->pl_config.dev = ds->dev;
1188 dp->pl_config.type = PHYLINK_DEV;
1189 dp->pl_config.pcs_poll = ds->pcs_poll;
1190
1191 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1192 mode, &dsa_port_phylink_mac_ops);
1193 if (IS_ERR(dp->pl)) {
1194 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1195 return PTR_ERR(dp->pl);
1196 }
1197
1198 err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1199 if (err && err != -ENODEV) {
1200 pr_err("could not attach to PHY: %d\n", err);
1201 goto err_phy_connect;
1202 }
1203
1204 return 0;
1205
1206 err_phy_connect:
1207 phylink_destroy(dp->pl);
1208 return err;
1209 }
1210
dsa_port_link_register_of(struct dsa_port * dp)1211 int dsa_port_link_register_of(struct dsa_port *dp)
1212 {
1213 struct dsa_switch *ds = dp->ds;
1214 struct device_node *phy_np;
1215 int port = dp->index;
1216
1217 if (!ds->ops->adjust_link) {
1218 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1219 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1220 if (ds->ops->phylink_mac_link_down)
1221 ds->ops->phylink_mac_link_down(ds, port,
1222 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1223 of_node_put(phy_np);
1224 return dsa_port_phylink_register(dp);
1225 }
1226 of_node_put(phy_np);
1227 return 0;
1228 }
1229
1230 dev_warn(ds->dev,
1231 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1232
1233 if (of_phy_is_fixed_link(dp->dn))
1234 return dsa_port_fixed_link_register_of(dp);
1235 else
1236 return dsa_port_setup_phy_of(dp, true);
1237 }
1238
dsa_port_link_unregister_of(struct dsa_port * dp)1239 void dsa_port_link_unregister_of(struct dsa_port *dp)
1240 {
1241 struct dsa_switch *ds = dp->ds;
1242
1243 if (!ds->ops->adjust_link && dp->pl) {
1244 rtnl_lock();
1245 phylink_disconnect_phy(dp->pl);
1246 rtnl_unlock();
1247 phylink_destroy(dp->pl);
1248 dp->pl = NULL;
1249 return;
1250 }
1251
1252 if (of_phy_is_fixed_link(dp->dn))
1253 of_phy_deregister_fixed_link(dp->dn);
1254 else
1255 dsa_port_setup_phy_of(dp, false);
1256 }
1257
dsa_port_get_phy_strings(struct dsa_port * dp,uint8_t * data)1258 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1259 {
1260 struct phy_device *phydev;
1261 int ret = -EOPNOTSUPP;
1262
1263 if (of_phy_is_fixed_link(dp->dn))
1264 return ret;
1265
1266 phydev = dsa_port_get_phy_device(dp);
1267 if (IS_ERR_OR_NULL(phydev))
1268 return ret;
1269
1270 ret = phy_ethtool_get_strings(phydev, data);
1271 put_device(&phydev->mdio.dev);
1272
1273 return ret;
1274 }
1275 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1276
dsa_port_get_ethtool_phy_stats(struct dsa_port * dp,uint64_t * data)1277 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1278 {
1279 struct phy_device *phydev;
1280 int ret = -EOPNOTSUPP;
1281
1282 if (of_phy_is_fixed_link(dp->dn))
1283 return ret;
1284
1285 phydev = dsa_port_get_phy_device(dp);
1286 if (IS_ERR_OR_NULL(phydev))
1287 return ret;
1288
1289 ret = phy_ethtool_get_stats(phydev, NULL, data);
1290 put_device(&phydev->mdio.dev);
1291
1292 return ret;
1293 }
1294 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1295
dsa_port_get_phy_sset_count(struct dsa_port * dp)1296 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1297 {
1298 struct phy_device *phydev;
1299 int ret = -EOPNOTSUPP;
1300
1301 if (of_phy_is_fixed_link(dp->dn))
1302 return ret;
1303
1304 phydev = dsa_port_get_phy_device(dp);
1305 if (IS_ERR_OR_NULL(phydev))
1306 return ret;
1307
1308 ret = phy_ethtool_get_sset_count(phydev);
1309 put_device(&phydev->mdio.dev);
1310
1311 return ret;
1312 }
1313 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1314
dsa_port_hsr_join(struct dsa_port * dp,struct net_device * hsr)1315 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1316 {
1317 struct dsa_notifier_hsr_info info = {
1318 .sw_index = dp->ds->index,
1319 .port = dp->index,
1320 .hsr = hsr,
1321 };
1322 int err;
1323
1324 dp->hsr_dev = hsr;
1325
1326 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1327 if (err)
1328 dp->hsr_dev = NULL;
1329
1330 return err;
1331 }
1332
dsa_port_hsr_leave(struct dsa_port * dp,struct net_device * hsr)1333 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1334 {
1335 struct dsa_notifier_hsr_info info = {
1336 .sw_index = dp->ds->index,
1337 .port = dp->index,
1338 .hsr = hsr,
1339 };
1340 int err;
1341
1342 dp->hsr_dev = NULL;
1343
1344 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1345 if (err)
1346 dev_err(dp->ds->dev,
1347 "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n",
1348 dp->index, ERR_PTR(err));
1349 }
1350
dsa_port_tag_8021q_vlan_add(struct dsa_port * dp,u16 vid,bool broadcast)1351 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1352 {
1353 struct dsa_notifier_tag_8021q_vlan_info info = {
1354 .tree_index = dp->ds->dst->index,
1355 .sw_index = dp->ds->index,
1356 .port = dp->index,
1357 .vid = vid,
1358 };
1359
1360 if (broadcast)
1361 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1362
1363 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1364 }
1365
dsa_port_tag_8021q_vlan_del(struct dsa_port * dp,u16 vid,bool broadcast)1366 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1367 {
1368 struct dsa_notifier_tag_8021q_vlan_info info = {
1369 .tree_index = dp->ds->dst->index,
1370 .sw_index = dp->ds->index,
1371 .port = dp->index,
1372 .vid = vid,
1373 };
1374 int err;
1375
1376 if (broadcast)
1377 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1378 else
1379 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1380 if (err)
1381 dev_err(dp->ds->dev,
1382 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1383 dp->index, vid, ERR_PTR(err));
1384 }
1385