1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <net/switchdev.h>
25 #include <net/pkt_cls.h>
26 #include <net/tc_act/tc_mirred.h>
27 #include <net/netevent.h>
28 #include <net/tc_act/tc_sample.h>
29 #include <net/addrconf.h>
30
31 #include "spectrum.h"
32 #include "pci.h"
33 #include "core.h"
34 #include "reg.h"
35 #include "port.h"
36 #include "trap.h"
37 #include "txheader.h"
38 #include "spectrum_cnt.h"
39 #include "spectrum_dpipe.h"
40 #include "spectrum_acl_flex_actions.h"
41 #include "spectrum_span.h"
42 #include "../mlxfw/mlxfw.h"
43
44 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
45
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 1703
48 #define MLXSW_SP1_FWREV_SUBMINOR 4
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52 .major = MLXSW_SP1_FWREV_MAJOR,
53 .minor = MLXSW_SP1_FWREV_MINOR,
54 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56 };
57
58 #define MLXSW_SP1_FW_FILENAME \
59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68 * Tx header version.
69 * Must be set to 1.
70 */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74 * Packet control type.
75 * 0 - Ethernet control (e.g. EMADs, LACP)
76 * 1 - Ethernet data
77 */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81 * Packet protocol type. Must be set to 1 (Ethernet).
82 */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86 * Packet is sent from the router. Valid for data packets only.
87 */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91 * Indicates if the 'fid' field is valid and should be used for
92 * forwarding lookup. Valid for data packets only.
93 */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97 * Switch partition ID. Must be set to 0.
98 */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102 * Indicates if the packet should use the control TClass and not one
103 * of the data TClasses.
104 */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108 * Egress TClass to be used on the egress device on the egress port.
109 */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113 * Destination local port for unicast packets.
114 * Destination multicast ID for multicast packets.
115 *
116 * Control packets are directed to a specific egress port, while data
117 * packets are transmitted through the CPU port (0) into the switch partition,
118 * where forwarding rules are applied.
119 */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125 * Valid for data packets only.
126 */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130 * 0 - Data packets
131 * 6 - Control packets
132 */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 struct mlxsw_sp_mlxfw_dev {
136 struct mlxfw_dev mlxfw_dev;
137 struct mlxsw_sp *mlxsw_sp;
138 };
139
mlxsw_sp_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)140 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
141 u16 component_index, u32 *p_max_size,
142 u8 *p_align_bits, u16 *p_max_write_size)
143 {
144 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
145 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
146 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
147 char mcqi_pl[MLXSW_REG_MCQI_LEN];
148 int err;
149
150 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
152 if (err)
153 return err;
154 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
155 p_max_write_size);
156
157 *p_align_bits = max_t(u8, *p_align_bits, 2);
158 *p_max_write_size = min_t(u16, *p_max_write_size,
159 MLXSW_REG_MCDA_MAX_DATA_LEN);
160 return 0;
161 }
162
mlxsw_sp_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)163 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
164 {
165 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
166 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
168 char mcc_pl[MLXSW_REG_MCC_LEN];
169 u8 control_state;
170 int err;
171
172 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
173 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
174 if (err)
175 return err;
176
177 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
178 if (control_state != MLXFW_FSM_STATE_IDLE)
179 return -EBUSY;
180
181 mlxsw_reg_mcc_pack(mcc_pl,
182 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
183 0, *fwhandle, 0);
184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
185 }
186
mlxsw_sp_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)187 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
188 u32 fwhandle, u16 component_index,
189 u32 component_size)
190 {
191 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
192 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
194 char mcc_pl[MLXSW_REG_MCC_LEN];
195
196 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
197 component_index, fwhandle, component_size);
198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 }
200
mlxsw_sp_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)201 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
202 u32 fwhandle, u8 *data, u16 size,
203 u32 offset)
204 {
205 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
206 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
208 char mcda_pl[MLXSW_REG_MCDA_LEN];
209
210 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
212 }
213
mlxsw_sp_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)214 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
215 u32 fwhandle, u16 component_index)
216 {
217 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
218 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
220 char mcc_pl[MLXSW_REG_MCC_LEN];
221
222 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
223 component_index, fwhandle, 0);
224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
225 }
226
mlxsw_sp_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)227 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
228 {
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcc_pl[MLXSW_REG_MCC_LEN];
233
234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
235 fwhandle, 0);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
237 }
238
mlxsw_sp_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)239 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
240 enum mlxfw_fsm_state *fsm_state,
241 enum mlxfw_fsm_state_err *fsm_state_err)
242 {
243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
246 char mcc_pl[MLXSW_REG_MCC_LEN];
247 u8 control_state;
248 u8 error_code;
249 int err;
250
251 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
252 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
253 if (err)
254 return err;
255
256 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
257 *fsm_state = control_state;
258 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
259 MLXFW_FSM_STATE_ERR_MAX);
260 return 0;
261 }
262
mlxsw_sp_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)263 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
264 {
265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
268 char mcc_pl[MLXSW_REG_MCC_LEN];
269
270 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
271 fwhandle, 0);
272 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
273 }
274
mlxsw_sp_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)275 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
276 {
277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
280 char mcc_pl[MLXSW_REG_MCC_LEN];
281
282 mlxsw_reg_mcc_pack(mcc_pl,
283 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
284 fwhandle, 0);
285 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
286 }
287
288 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
289 .component_query = mlxsw_sp_component_query,
290 .fsm_lock = mlxsw_sp_fsm_lock,
291 .fsm_component_update = mlxsw_sp_fsm_component_update,
292 .fsm_block_download = mlxsw_sp_fsm_block_download,
293 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
294 .fsm_activate = mlxsw_sp_fsm_activate,
295 .fsm_query_state = mlxsw_sp_fsm_query_state,
296 .fsm_cancel = mlxsw_sp_fsm_cancel,
297 .fsm_release = mlxsw_sp_fsm_release
298 };
299
mlxsw_sp_firmware_flash(struct mlxsw_sp * mlxsw_sp,const struct firmware * firmware)300 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
301 const struct firmware *firmware)
302 {
303 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
304 .mlxfw_dev = {
305 .ops = &mlxsw_sp_mlxfw_dev_ops,
306 .psid = mlxsw_sp->bus_info->psid,
307 .psid_size = strlen(mlxsw_sp->bus_info->psid),
308 },
309 .mlxsw_sp = mlxsw_sp
310 };
311 int err;
312
313 mlxsw_core_fw_flash_start(mlxsw_sp->core);
314 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
315 mlxsw_core_fw_flash_end(mlxsw_sp->core);
316
317 return err;
318 }
319
mlxsw_sp_fw_rev_validate(struct mlxsw_sp * mlxsw_sp)320 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
321 {
322 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
323 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
324 const char *fw_filename = mlxsw_sp->fw_filename;
325 const struct firmware *firmware;
326 int err;
327
328 /* Don't check if driver does not require it */
329 if (!req_rev || !fw_filename)
330 return 0;
331
332 /* Validate driver & FW are compatible */
333 if (rev->major != req_rev->major) {
334 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
335 rev->major, req_rev->major);
336 return -EINVAL;
337 }
338 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
339 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
340 (rev->minor > req_rev->minor ||
341 (rev->minor == req_rev->minor &&
342 rev->subminor >= req_rev->subminor)))
343 return 0;
344
345 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
346 rev->major, rev->minor, rev->subminor);
347 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
348 fw_filename);
349
350 err = request_firmware_direct(&firmware, fw_filename,
351 mlxsw_sp->bus_info->dev);
352 if (err) {
353 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
354 fw_filename);
355 return err;
356 }
357
358 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
359 release_firmware(firmware);
360 if (err)
361 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
362
363 /* On FW flash success, tell the caller FW reset is needed
364 * if current FW supports it.
365 */
366 if (rev->minor >= req_rev->can_reset_minor)
367 return err ? err : -EAGAIN;
368 else
369 return 0;
370 }
371
mlxsw_sp_flow_counter_get(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index,u64 * packets,u64 * bytes)372 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
373 unsigned int counter_index, u64 *packets,
374 u64 *bytes)
375 {
376 char mgpc_pl[MLXSW_REG_MGPC_LEN];
377 int err;
378
379 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
380 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
381 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
382 if (err)
383 return err;
384 if (packets)
385 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
386 if (bytes)
387 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
388 return 0;
389 }
390
mlxsw_sp_flow_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)391 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
392 unsigned int counter_index)
393 {
394 char mgpc_pl[MLXSW_REG_MGPC_LEN];
395
396 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
397 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
398 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
399 }
400
mlxsw_sp_flow_counter_alloc(struct mlxsw_sp * mlxsw_sp,unsigned int * p_counter_index)401 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
402 unsigned int *p_counter_index)
403 {
404 int err;
405
406 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
407 p_counter_index);
408 if (err)
409 return err;
410 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
411 if (err)
412 goto err_counter_clear;
413 return 0;
414
415 err_counter_clear:
416 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
417 *p_counter_index);
418 return err;
419 }
420
mlxsw_sp_flow_counter_free(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)421 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
422 unsigned int counter_index)
423 {
424 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
425 counter_index);
426 }
427
mlxsw_sp_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)428 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
429 const struct mlxsw_tx_info *tx_info)
430 {
431 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
432
433 memset(txhdr, 0, MLXSW_TXHDR_LEN);
434
435 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
436 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
437 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
438 mlxsw_tx_hdr_swid_set(txhdr, 0);
439 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
440 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
441 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
442 }
443
mlxsw_sp_stp_spms_state(u8 state)444 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
445 {
446 switch (state) {
447 case BR_STATE_FORWARDING:
448 return MLXSW_REG_SPMS_STATE_FORWARDING;
449 case BR_STATE_LEARNING:
450 return MLXSW_REG_SPMS_STATE_LEARNING;
451 case BR_STATE_LISTENING: /* fall-through */
452 case BR_STATE_DISABLED: /* fall-through */
453 case BR_STATE_BLOCKING:
454 return MLXSW_REG_SPMS_STATE_DISCARDING;
455 default:
456 BUG();
457 }
458 }
459
mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u8 state)460 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
461 u8 state)
462 {
463 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
465 char *spms_pl;
466 int err;
467
468 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
469 if (!spms_pl)
470 return -ENOMEM;
471 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
472 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
473
474 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
475 kfree(spms_pl);
476 return err;
477 }
478
mlxsw_sp_base_mac_get(struct mlxsw_sp * mlxsw_sp)479 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
480 {
481 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
482 int err;
483
484 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
485 if (err)
486 return err;
487 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
488 return 0;
489 }
490
mlxsw_sp_port_sample_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable,u32 rate)491 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
492 bool enable, u32 rate)
493 {
494 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
495 char mpsc_pl[MLXSW_REG_MPSC_LEN];
496
497 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
499 }
500
mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_up)501 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
502 bool is_up)
503 {
504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
505 char paos_pl[MLXSW_REG_PAOS_LEN];
506
507 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
508 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
509 MLXSW_PORT_ADMIN_STATUS_DOWN);
510 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
511 }
512
mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port * mlxsw_sp_port,unsigned char * addr)513 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
514 unsigned char *addr)
515 {
516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
517 char ppad_pl[MLXSW_REG_PPAD_LEN];
518
519 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
520 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
521 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
522 }
523
mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port * mlxsw_sp_port)524 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
525 {
526 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
527 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
528
529 ether_addr_copy(addr, mlxsw_sp->base_mac);
530 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
531 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
532 }
533
mlxsw_sp_port_mtu_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mtu)534 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
535 {
536 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
537 char pmtu_pl[MLXSW_REG_PMTU_LEN];
538 int max_mtu;
539 int err;
540
541 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
542 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
543 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
544 if (err)
545 return err;
546 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
547
548 if (mtu > max_mtu)
549 return -EINVAL;
550
551 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
553 }
554
mlxsw_sp_port_swid_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 swid)555 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
556 {
557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
558 char pspa_pl[MLXSW_REG_PSPA_LEN];
559
560 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
562 }
563
mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)564 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
565 {
566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
567 char svpe_pl[MLXSW_REG_SVPE_LEN];
568
569 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
570 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
571 }
572
mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool learn_enable)573 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
574 bool learn_enable)
575 {
576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577 char *spvmlr_pl;
578 int err;
579
580 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
581 if (!spvmlr_pl)
582 return -ENOMEM;
583 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
584 learn_enable);
585 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
586 kfree(spvmlr_pl);
587 return err;
588 }
589
__mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)590 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
591 u16 vid)
592 {
593 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
594 char spvid_pl[MLXSW_REG_SPVID_LEN];
595
596 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
597 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
598 }
599
mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port * mlxsw_sp_port,bool allow)600 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
601 bool allow)
602 {
603 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
604 char spaft_pl[MLXSW_REG_SPAFT_LEN];
605
606 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
607 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
608 }
609
mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)610 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
611 {
612 int err;
613
614 if (!vid) {
615 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
616 if (err)
617 return err;
618 } else {
619 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
620 if (err)
621 return err;
622 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
623 if (err)
624 goto err_port_allow_untagged_set;
625 }
626
627 mlxsw_sp_port->pvid = vid;
628 return 0;
629
630 err_port_allow_untagged_set:
631 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
632 return err;
633 }
634
635 static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port * mlxsw_sp_port)636 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
637 {
638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639 char sspr_pl[MLXSW_REG_SSPR_LEN];
640
641 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
642 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
643 }
644
mlxsw_sp_port_module_info_get(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 * p_module,u8 * p_width,u8 * p_lane)645 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
646 u8 local_port, u8 *p_module,
647 u8 *p_width, u8 *p_lane)
648 {
649 char pmlp_pl[MLXSW_REG_PMLP_LEN];
650 int err;
651
652 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
653 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
654 if (err)
655 return err;
656 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
657 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
658 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
659 return 0;
660 }
661
mlxsw_sp_port_module_map(struct mlxsw_sp_port * mlxsw_sp_port,u8 module,u8 width,u8 lane)662 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
663 u8 module, u8 width, u8 lane)
664 {
665 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
666 char pmlp_pl[MLXSW_REG_PMLP_LEN];
667 int i;
668
669 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
670 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
671 for (i = 0; i < width; i++) {
672 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
673 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
674 }
675
676 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
677 }
678
mlxsw_sp_port_module_unmap(struct mlxsw_sp_port * mlxsw_sp_port)679 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
680 {
681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
682 char pmlp_pl[MLXSW_REG_PMLP_LEN];
683
684 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
685 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
687 }
688
mlxsw_sp_port_open(struct net_device * dev)689 static int mlxsw_sp_port_open(struct net_device *dev)
690 {
691 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
692 int err;
693
694 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
695 if (err)
696 return err;
697 netif_start_queue(dev);
698 return 0;
699 }
700
mlxsw_sp_port_stop(struct net_device * dev)701 static int mlxsw_sp_port_stop(struct net_device *dev)
702 {
703 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
704
705 netif_stop_queue(dev);
706 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
707 }
708
mlxsw_sp_port_xmit(struct sk_buff * skb,struct net_device * dev)709 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
710 struct net_device *dev)
711 {
712 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
714 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
715 const struct mlxsw_tx_info tx_info = {
716 .local_port = mlxsw_sp_port->local_port,
717 .is_emad = false,
718 };
719 u64 len;
720 int err;
721
722 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
723 return NETDEV_TX_BUSY;
724
725 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
726 struct sk_buff *skb_orig = skb;
727
728 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
729 if (!skb) {
730 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
731 dev_kfree_skb_any(skb_orig);
732 return NETDEV_TX_OK;
733 }
734 dev_consume_skb_any(skb_orig);
735 }
736
737 if (eth_skb_pad(skb)) {
738 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
739 return NETDEV_TX_OK;
740 }
741
742 mlxsw_sp_txhdr_construct(skb, &tx_info);
743 /* TX header is consumed by HW on the way so we shouldn't count its
744 * bytes as being sent.
745 */
746 len = skb->len - MLXSW_TXHDR_LEN;
747
748 /* Due to a race we might fail here because of a full queue. In that
749 * unlikely case we simply drop the packet.
750 */
751 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
752
753 if (!err) {
754 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
755 u64_stats_update_begin(&pcpu_stats->syncp);
756 pcpu_stats->tx_packets++;
757 pcpu_stats->tx_bytes += len;
758 u64_stats_update_end(&pcpu_stats->syncp);
759 } else {
760 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
761 dev_kfree_skb_any(skb);
762 }
763 return NETDEV_TX_OK;
764 }
765
mlxsw_sp_set_rx_mode(struct net_device * dev)766 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
767 {
768 }
769
mlxsw_sp_port_set_mac_address(struct net_device * dev,void * p)770 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
771 {
772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
773 struct sockaddr *addr = p;
774 int err;
775
776 if (!is_valid_ether_addr(addr->sa_data))
777 return -EADDRNOTAVAIL;
778
779 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
780 if (err)
781 return err;
782 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
783 return 0;
784 }
785
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp * mlxsw_sp,int mtu)786 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
787 int mtu)
788 {
789 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
790 }
791
792 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
793
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp * mlxsw_sp,int mtu,u16 delay)794 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
795 u16 delay)
796 {
797 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
798 BITS_PER_BYTE));
799 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
800 mtu);
801 }
802
803 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
804 * Assumes 100m cable and maximum MTU.
805 */
806 #define MLXSW_SP_PAUSE_DELAY 58752
807
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp * mlxsw_sp,int mtu,u16 delay,bool pfc,bool pause)808 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
809 u16 delay, bool pfc, bool pause)
810 {
811 if (pfc)
812 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
813 else if (pause)
814 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
815 else
816 return 0;
817 }
818
mlxsw_sp_pg_buf_pack(char * pbmc_pl,int index,u16 size,u16 thres,bool lossy)819 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
820 bool lossy)
821 {
822 if (lossy)
823 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
824 else
825 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
826 thres);
827 }
828
__mlxsw_sp_port_headroom_set(struct mlxsw_sp_port * mlxsw_sp_port,int mtu,u8 * prio_tc,bool pause_en,struct ieee_pfc * my_pfc)829 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
830 u8 *prio_tc, bool pause_en,
831 struct ieee_pfc *my_pfc)
832 {
833 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
834 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
835 u16 delay = !!my_pfc ? my_pfc->delay : 0;
836 char pbmc_pl[MLXSW_REG_PBMC_LEN];
837 int i, j, err;
838
839 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
840 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
841 if (err)
842 return err;
843
844 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
845 bool configure = false;
846 bool pfc = false;
847 u16 thres_cells;
848 u16 delay_cells;
849 bool lossy;
850
851 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
852 if (prio_tc[j] == i) {
853 pfc = pfc_en & BIT(j);
854 configure = true;
855 break;
856 }
857 }
858
859 if (!configure)
860 continue;
861
862 lossy = !(pfc || pause_en);
863 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
864 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
865 pfc, pause_en);
866 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
867 thres_cells, lossy);
868 }
869
870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
871 }
872
mlxsw_sp_port_headroom_set(struct mlxsw_sp_port * mlxsw_sp_port,int mtu,bool pause_en)873 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
874 int mtu, bool pause_en)
875 {
876 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
877 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
878 struct ieee_pfc *my_pfc;
879 u8 *prio_tc;
880
881 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
882 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
883
884 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
885 pause_en, my_pfc);
886 }
887
mlxsw_sp_port_change_mtu(struct net_device * dev,int mtu)888 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
889 {
890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
892 int err;
893
894 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
895 if (err)
896 return err;
897 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
898 if (err)
899 goto err_span_port_mtu_update;
900 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
901 if (err)
902 goto err_port_mtu_set;
903 dev->mtu = mtu;
904 return 0;
905
906 err_port_mtu_set:
907 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
908 err_span_port_mtu_update:
909 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
910 return err;
911 }
912
913 static int
mlxsw_sp_port_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)914 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
915 struct rtnl_link_stats64 *stats)
916 {
917 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
918 struct mlxsw_sp_port_pcpu_stats *p;
919 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
920 u32 tx_dropped = 0;
921 unsigned int start;
922 int i;
923
924 for_each_possible_cpu(i) {
925 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
926 do {
927 start = u64_stats_fetch_begin_irq(&p->syncp);
928 rx_packets = p->rx_packets;
929 rx_bytes = p->rx_bytes;
930 tx_packets = p->tx_packets;
931 tx_bytes = p->tx_bytes;
932 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
933
934 stats->rx_packets += rx_packets;
935 stats->rx_bytes += rx_bytes;
936 stats->tx_packets += tx_packets;
937 stats->tx_bytes += tx_bytes;
938 /* tx_dropped is u32, updated without syncp protection. */
939 tx_dropped += p->tx_dropped;
940 }
941 stats->tx_dropped = tx_dropped;
942 return 0;
943 }
944
mlxsw_sp_port_has_offload_stats(const struct net_device * dev,int attr_id)945 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
946 {
947 switch (attr_id) {
948 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
949 return true;
950 }
951
952 return false;
953 }
954
mlxsw_sp_port_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)955 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
956 void *sp)
957 {
958 switch (attr_id) {
959 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
960 return mlxsw_sp_port_get_sw_stats64(dev, sp);
961 }
962
963 return -EINVAL;
964 }
965
mlxsw_sp_port_get_stats_raw(struct net_device * dev,int grp,int prio,char * ppcnt_pl)966 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
967 int prio, char *ppcnt_pl)
968 {
969 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
971
972 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
973 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
974 }
975
mlxsw_sp_port_get_hw_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)976 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
977 struct rtnl_link_stats64 *stats)
978 {
979 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
980 int err;
981
982 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
983 0, ppcnt_pl);
984 if (err)
985 goto out;
986
987 stats->tx_packets =
988 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
989 stats->rx_packets =
990 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
991 stats->tx_bytes =
992 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
993 stats->rx_bytes =
994 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
995 stats->multicast =
996 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
997
998 stats->rx_crc_errors =
999 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1000 stats->rx_frame_errors =
1001 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1002
1003 stats->rx_length_errors = (
1004 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1005 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1006 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1007
1008 stats->rx_errors = (stats->rx_crc_errors +
1009 stats->rx_frame_errors + stats->rx_length_errors);
1010
1011 out:
1012 return err;
1013 }
1014
1015 static void
mlxsw_sp_port_get_hw_xstats(struct net_device * dev,struct mlxsw_sp_port_xstats * xstats)1016 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1017 struct mlxsw_sp_port_xstats *xstats)
1018 {
1019 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1020 int err, i;
1021
1022 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1023 ppcnt_pl);
1024 if (!err)
1025 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1026
1027 for (i = 0; i < TC_MAX_QUEUE; i++) {
1028 err = mlxsw_sp_port_get_stats_raw(dev,
1029 MLXSW_REG_PPCNT_TC_CONG_TC,
1030 i, ppcnt_pl);
1031 if (!err)
1032 xstats->wred_drop[i] =
1033 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1034
1035 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1036 i, ppcnt_pl);
1037 if (err)
1038 continue;
1039
1040 xstats->backlog[i] =
1041 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1042 xstats->tail_drop[i] =
1043 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1044 }
1045
1046 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1047 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1048 i, ppcnt_pl);
1049 if (err)
1050 continue;
1051
1052 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1053 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1054 }
1055 }
1056
update_stats_cache(struct work_struct * work)1057 static void update_stats_cache(struct work_struct *work)
1058 {
1059 struct mlxsw_sp_port *mlxsw_sp_port =
1060 container_of(work, struct mlxsw_sp_port,
1061 periodic_hw_stats.update_dw.work);
1062
1063 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1064 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
1065 * necessary when port goes down.
1066 */
1067 goto out;
1068
1069 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1070 &mlxsw_sp_port->periodic_hw_stats.stats);
1071 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1072 &mlxsw_sp_port->periodic_hw_stats.xstats);
1073
1074 out:
1075 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1076 MLXSW_HW_STATS_UPDATE_TIME);
1077 }
1078
1079 /* Return the stats from a cache that is updated periodically,
1080 * as this function might get called in an atomic context.
1081 */
1082 static void
mlxsw_sp_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1083 mlxsw_sp_port_get_stats64(struct net_device *dev,
1084 struct rtnl_link_stats64 *stats)
1085 {
1086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1087
1088 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1089 }
1090
__mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1091 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1092 u16 vid_begin, u16 vid_end,
1093 bool is_member, bool untagged)
1094 {
1095 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1096 char *spvm_pl;
1097 int err;
1098
1099 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1100 if (!spvm_pl)
1101 return -ENOMEM;
1102
1103 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1104 vid_end, is_member, untagged);
1105 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1106 kfree(spvm_pl);
1107 return err;
1108 }
1109
mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1110 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1111 u16 vid_end, bool is_member, bool untagged)
1112 {
1113 u16 vid, vid_e;
1114 int err;
1115
1116 for (vid = vid_begin; vid <= vid_end;
1117 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1118 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1119 vid_end);
1120
1121 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1122 is_member, untagged);
1123 if (err)
1124 return err;
1125 }
1126
1127 return 0;
1128 }
1129
mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port * mlxsw_sp_port)1130 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1131 {
1132 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1133
1134 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1135 &mlxsw_sp_port->vlans_list, list)
1136 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1137 }
1138
1139 static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1140 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1141 {
1142 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1143 bool untagged = vid == 1;
1144 int err;
1145
1146 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1147 if (err)
1148 return ERR_PTR(err);
1149
1150 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1151 if (!mlxsw_sp_port_vlan) {
1152 err = -ENOMEM;
1153 goto err_port_vlan_alloc;
1154 }
1155
1156 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1157 mlxsw_sp_port_vlan->ref_count = 1;
1158 mlxsw_sp_port_vlan->vid = vid;
1159 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1160
1161 return mlxsw_sp_port_vlan;
1162
1163 err_port_vlan_alloc:
1164 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1165 return ERR_PTR(err);
1166 }
1167
1168 static void
mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1169 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1170 {
1171 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1172 u16 vid = mlxsw_sp_port_vlan->vid;
1173
1174 list_del(&mlxsw_sp_port_vlan->list);
1175 kfree(mlxsw_sp_port_vlan);
1176 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1177 }
1178
1179 struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_get(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1180 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1181 {
1182 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1183
1184 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1185 if (mlxsw_sp_port_vlan) {
1186 mlxsw_sp_port_vlan->ref_count++;
1187 return mlxsw_sp_port_vlan;
1188 }
1189
1190 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1191 }
1192
mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1193 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1194 {
1195 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1196
1197 if (--mlxsw_sp_port_vlan->ref_count != 0)
1198 return;
1199
1200 if (mlxsw_sp_port_vlan->bridge_port)
1201 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1202 else if (fid)
1203 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1204
1205 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1206 }
1207
mlxsw_sp_port_add_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1208 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1209 __be16 __always_unused proto, u16 vid)
1210 {
1211 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1212
1213 /* VLAN 0 is added to HW filter when device goes up, but it is
1214 * reserved in our case, so simply return.
1215 */
1216 if (!vid)
1217 return 0;
1218
1219 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1220 }
1221
mlxsw_sp_port_kill_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1222 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1223 __be16 __always_unused proto, u16 vid)
1224 {
1225 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1226 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1227
1228 /* VLAN 0 is removed from HW filter when device goes down, but
1229 * it is reserved in our case, so simply return.
1230 */
1231 if (!vid)
1232 return 0;
1233
1234 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1235 if (!mlxsw_sp_port_vlan)
1236 return 0;
1237 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1238
1239 return 0;
1240 }
1241
mlxsw_sp_port_get_phys_port_name(struct net_device * dev,char * name,size_t len)1242 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1243 size_t len)
1244 {
1245 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1246
1247 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
1248 mlxsw_sp_port->local_port,
1249 name, len);
1250 }
1251
1252 static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port * port,unsigned long cookie)1253 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1254 unsigned long cookie) {
1255 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1256
1257 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1258 if (mall_tc_entry->cookie == cookie)
1259 return mall_tc_entry;
1260
1261 return NULL;
1262 }
1263
1264 static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_port_mall_mirror_tc_entry * mirror,const struct tc_action * a,bool ingress)1265 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1266 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1267 const struct tc_action *a,
1268 bool ingress)
1269 {
1270 enum mlxsw_sp_span_type span_type;
1271 struct net_device *to_dev;
1272
1273 to_dev = tcf_mirred_dev(a);
1274 if (!to_dev) {
1275 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1276 return -EINVAL;
1277 }
1278
1279 mirror->ingress = ingress;
1280 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1281 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
1282 true, &mirror->span_id);
1283 }
1284
1285 static void
mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_port_mall_mirror_tc_entry * mirror)1286 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1287 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1288 {
1289 enum mlxsw_sp_span_type span_type;
1290
1291 span_type = mirror->ingress ?
1292 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1293 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1294 span_type, true);
1295 }
1296
1297 static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * cls,const struct tc_action * a,bool ingress)1298 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1299 struct tc_cls_matchall_offload *cls,
1300 const struct tc_action *a,
1301 bool ingress)
1302 {
1303 int err;
1304
1305 if (!mlxsw_sp_port->sample)
1306 return -EOPNOTSUPP;
1307 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1308 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1309 return -EEXIST;
1310 }
1311 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1312 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1313 return -EOPNOTSUPP;
1314 }
1315
1316 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1317 tcf_sample_psample_group(a));
1318 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1319 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1320 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1321
1322 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1323 if (err)
1324 goto err_port_sample_set;
1325 return 0;
1326
1327 err_port_sample_set:
1328 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1329 return err;
1330 }
1331
1332 static void
mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port * mlxsw_sp_port)1333 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1334 {
1335 if (!mlxsw_sp_port->sample)
1336 return;
1337
1338 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1339 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1340 }
1341
mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f,bool ingress)1342 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1343 struct tc_cls_matchall_offload *f,
1344 bool ingress)
1345 {
1346 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1347 __be16 protocol = f->common.protocol;
1348 const struct tc_action *a;
1349 LIST_HEAD(actions);
1350 int err;
1351
1352 if (!tcf_exts_has_one_action(f->exts)) {
1353 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1354 return -EOPNOTSUPP;
1355 }
1356
1357 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1358 if (!mall_tc_entry)
1359 return -ENOMEM;
1360 mall_tc_entry->cookie = f->cookie;
1361
1362 a = tcf_exts_first_action(f->exts);
1363
1364 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1365 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1366
1367 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1368 mirror = &mall_tc_entry->mirror;
1369 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1370 mirror, a, ingress);
1371 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1372 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1373 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1374 a, ingress);
1375 } else {
1376 err = -EOPNOTSUPP;
1377 }
1378
1379 if (err)
1380 goto err_add_action;
1381
1382 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1383 return 0;
1384
1385 err_add_action:
1386 kfree(mall_tc_entry);
1387 return err;
1388 }
1389
mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f)1390 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1391 struct tc_cls_matchall_offload *f)
1392 {
1393 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1394
1395 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1396 f->cookie);
1397 if (!mall_tc_entry) {
1398 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1399 return;
1400 }
1401 list_del(&mall_tc_entry->list);
1402
1403 switch (mall_tc_entry->type) {
1404 case MLXSW_SP_PORT_MALL_MIRROR:
1405 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1406 &mall_tc_entry->mirror);
1407 break;
1408 case MLXSW_SP_PORT_MALL_SAMPLE:
1409 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1410 break;
1411 default:
1412 WARN_ON(1);
1413 }
1414
1415 kfree(mall_tc_entry);
1416 }
1417
mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f,bool ingress)1418 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1419 struct tc_cls_matchall_offload *f,
1420 bool ingress)
1421 {
1422 switch (f->command) {
1423 case TC_CLSMATCHALL_REPLACE:
1424 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1425 ingress);
1426 case TC_CLSMATCHALL_DESTROY:
1427 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1428 return 0;
1429 default:
1430 return -EOPNOTSUPP;
1431 }
1432 }
1433
1434 static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block * acl_block,struct tc_cls_flower_offload * f)1435 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1436 struct tc_cls_flower_offload *f)
1437 {
1438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1439
1440 switch (f->command) {
1441 case TC_CLSFLOWER_REPLACE:
1442 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1443 case TC_CLSFLOWER_DESTROY:
1444 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1445 return 0;
1446 case TC_CLSFLOWER_STATS:
1447 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1448 case TC_CLSFLOWER_TMPLT_CREATE:
1449 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
1450 case TC_CLSFLOWER_TMPLT_DESTROY:
1451 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
1452 return 0;
1453 default:
1454 return -EOPNOTSUPP;
1455 }
1456 }
1457
mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,void * type_data,void * cb_priv,bool ingress)1458 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1459 void *type_data,
1460 void *cb_priv, bool ingress)
1461 {
1462 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1463
1464 switch (type) {
1465 case TC_SETUP_CLSMATCHALL:
1466 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1467 type_data))
1468 return -EOPNOTSUPP;
1469
1470 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1471 ingress);
1472 case TC_SETUP_CLSFLOWER:
1473 return 0;
1474 default:
1475 return -EOPNOTSUPP;
1476 }
1477 }
1478
mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1479 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1480 void *type_data,
1481 void *cb_priv)
1482 {
1483 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1484 cb_priv, true);
1485 }
1486
mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,void * type_data,void * cb_priv)1487 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1488 void *type_data,
1489 void *cb_priv)
1490 {
1491 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1492 cb_priv, false);
1493 }
1494
mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,void * type_data,void * cb_priv)1495 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1496 void *type_data, void *cb_priv)
1497 {
1498 struct mlxsw_sp_acl_block *acl_block = cb_priv;
1499
1500 switch (type) {
1501 case TC_SETUP_CLSMATCHALL:
1502 return 0;
1503 case TC_SETUP_CLSFLOWER:
1504 if (mlxsw_sp_acl_block_disabled(acl_block))
1505 return -EOPNOTSUPP;
1506
1507 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1508 default:
1509 return -EOPNOTSUPP;
1510 }
1511 }
1512
1513 static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port * mlxsw_sp_port,struct tcf_block * block,bool ingress,struct netlink_ext_ack * extack)1514 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1515 struct tcf_block *block, bool ingress,
1516 struct netlink_ext_ack *extack)
1517 {
1518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1519 struct mlxsw_sp_acl_block *acl_block;
1520 struct tcf_block_cb *block_cb;
1521 int err;
1522
1523 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1524 mlxsw_sp);
1525 if (!block_cb) {
1526 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1527 if (!acl_block)
1528 return -ENOMEM;
1529 block_cb = __tcf_block_cb_register(block,
1530 mlxsw_sp_setup_tc_block_cb_flower,
1531 mlxsw_sp, acl_block, extack);
1532 if (IS_ERR(block_cb)) {
1533 err = PTR_ERR(block_cb);
1534 goto err_cb_register;
1535 }
1536 } else {
1537 acl_block = tcf_block_cb_priv(block_cb);
1538 }
1539 tcf_block_cb_incref(block_cb);
1540 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1541 mlxsw_sp_port, ingress);
1542 if (err)
1543 goto err_block_bind;
1544
1545 if (ingress)
1546 mlxsw_sp_port->ing_acl_block = acl_block;
1547 else
1548 mlxsw_sp_port->eg_acl_block = acl_block;
1549
1550 return 0;
1551
1552 err_block_bind:
1553 if (!tcf_block_cb_decref(block_cb)) {
1554 __tcf_block_cb_unregister(block, block_cb);
1555 err_cb_register:
1556 mlxsw_sp_acl_block_destroy(acl_block);
1557 }
1558 return err;
1559 }
1560
1561 static void
mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port * mlxsw_sp_port,struct tcf_block * block,bool ingress)1562 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1563 struct tcf_block *block, bool ingress)
1564 {
1565 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1566 struct mlxsw_sp_acl_block *acl_block;
1567 struct tcf_block_cb *block_cb;
1568 int err;
1569
1570 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1571 mlxsw_sp);
1572 if (!block_cb)
1573 return;
1574
1575 if (ingress)
1576 mlxsw_sp_port->ing_acl_block = NULL;
1577 else
1578 mlxsw_sp_port->eg_acl_block = NULL;
1579
1580 acl_block = tcf_block_cb_priv(block_cb);
1581 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1582 mlxsw_sp_port, ingress);
1583 if (!err && !tcf_block_cb_decref(block_cb)) {
1584 __tcf_block_cb_unregister(block, block_cb);
1585 mlxsw_sp_acl_block_destroy(acl_block);
1586 }
1587 }
1588
mlxsw_sp_setup_tc_block(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_block_offload * f)1589 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1590 struct tc_block_offload *f)
1591 {
1592 tc_setup_cb_t *cb;
1593 bool ingress;
1594 int err;
1595
1596 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1597 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1598 ingress = true;
1599 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1600 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1601 ingress = false;
1602 } else {
1603 return -EOPNOTSUPP;
1604 }
1605
1606 switch (f->command) {
1607 case TC_BLOCK_BIND:
1608 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1609 mlxsw_sp_port, f->extack);
1610 if (err)
1611 return err;
1612 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1613 f->block, ingress,
1614 f->extack);
1615 if (err) {
1616 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1617 return err;
1618 }
1619 return 0;
1620 case TC_BLOCK_UNBIND:
1621 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1622 f->block, ingress);
1623 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1624 return 0;
1625 default:
1626 return -EOPNOTSUPP;
1627 }
1628 }
1629
mlxsw_sp_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1630 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1631 void *type_data)
1632 {
1633 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1634
1635 switch (type) {
1636 case TC_SETUP_BLOCK:
1637 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1638 case TC_SETUP_QDISC_RED:
1639 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1640 case TC_SETUP_QDISC_PRIO:
1641 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1642 default:
1643 return -EOPNOTSUPP;
1644 }
1645 }
1646
1647
mlxsw_sp_feature_hw_tc(struct net_device * dev,bool enable)1648 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1649 {
1650 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1651
1652 if (!enable) {
1653 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1654 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1655 !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1656 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1657 return -EINVAL;
1658 }
1659 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1660 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1661 } else {
1662 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1663 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1664 }
1665 return 0;
1666 }
1667
1668 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1669
mlxsw_sp_handle_feature(struct net_device * dev,netdev_features_t wanted_features,netdev_features_t feature,mlxsw_sp_feature_handler feature_handler)1670 static int mlxsw_sp_handle_feature(struct net_device *dev,
1671 netdev_features_t wanted_features,
1672 netdev_features_t feature,
1673 mlxsw_sp_feature_handler feature_handler)
1674 {
1675 netdev_features_t changes = wanted_features ^ dev->features;
1676 bool enable = !!(wanted_features & feature);
1677 int err;
1678
1679 if (!(changes & feature))
1680 return 0;
1681
1682 err = feature_handler(dev, enable);
1683 if (err) {
1684 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1685 enable ? "Enable" : "Disable", &feature, err);
1686 return err;
1687 }
1688
1689 if (enable)
1690 dev->features |= feature;
1691 else
1692 dev->features &= ~feature;
1693
1694 return 0;
1695 }
mlxsw_sp_set_features(struct net_device * dev,netdev_features_t features)1696 static int mlxsw_sp_set_features(struct net_device *dev,
1697 netdev_features_t features)
1698 {
1699 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1700 mlxsw_sp_feature_hw_tc);
1701 }
1702
1703 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1704 .ndo_open = mlxsw_sp_port_open,
1705 .ndo_stop = mlxsw_sp_port_stop,
1706 .ndo_start_xmit = mlxsw_sp_port_xmit,
1707 .ndo_setup_tc = mlxsw_sp_setup_tc,
1708 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1709 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1710 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1711 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1712 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1713 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1714 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1715 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1716 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1717 .ndo_set_features = mlxsw_sp_set_features,
1718 };
1719
mlxsw_sp_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)1720 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1721 struct ethtool_drvinfo *drvinfo)
1722 {
1723 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1724 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1725
1726 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
1727 sizeof(drvinfo->driver));
1728 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1729 sizeof(drvinfo->version));
1730 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1731 "%d.%d.%d",
1732 mlxsw_sp->bus_info->fw_rev.major,
1733 mlxsw_sp->bus_info->fw_rev.minor,
1734 mlxsw_sp->bus_info->fw_rev.subminor);
1735 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1736 sizeof(drvinfo->bus_info));
1737 }
1738
mlxsw_sp_port_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1739 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1740 struct ethtool_pauseparam *pause)
1741 {
1742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1743
1744 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1745 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1746 }
1747
mlxsw_sp_port_pause_set(struct mlxsw_sp_port * mlxsw_sp_port,struct ethtool_pauseparam * pause)1748 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1749 struct ethtool_pauseparam *pause)
1750 {
1751 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1752
1753 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1754 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1755 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1756
1757 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1758 pfcc_pl);
1759 }
1760
mlxsw_sp_port_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1761 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1762 struct ethtool_pauseparam *pause)
1763 {
1764 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1765 bool pause_en = pause->tx_pause || pause->rx_pause;
1766 int err;
1767
1768 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1769 netdev_err(dev, "PFC already enabled on port\n");
1770 return -EINVAL;
1771 }
1772
1773 if (pause->autoneg) {
1774 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1775 return -EINVAL;
1776 }
1777
1778 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1779 if (err) {
1780 netdev_err(dev, "Failed to configure port's headroom\n");
1781 return err;
1782 }
1783
1784 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1785 if (err) {
1786 netdev_err(dev, "Failed to set PAUSE parameters\n");
1787 goto err_port_pause_configure;
1788 }
1789
1790 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1791 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1792
1793 return 0;
1794
1795 err_port_pause_configure:
1796 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1797 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1798 return err;
1799 }
1800
1801 struct mlxsw_sp_port_hw_stats {
1802 char str[ETH_GSTRING_LEN];
1803 u64 (*getter)(const char *payload);
1804 bool cells_bytes;
1805 };
1806
1807 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1808 {
1809 .str = "a_frames_transmitted_ok",
1810 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1811 },
1812 {
1813 .str = "a_frames_received_ok",
1814 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1815 },
1816 {
1817 .str = "a_frame_check_sequence_errors",
1818 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1819 },
1820 {
1821 .str = "a_alignment_errors",
1822 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1823 },
1824 {
1825 .str = "a_octets_transmitted_ok",
1826 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1827 },
1828 {
1829 .str = "a_octets_received_ok",
1830 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1831 },
1832 {
1833 .str = "a_multicast_frames_xmitted_ok",
1834 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1835 },
1836 {
1837 .str = "a_broadcast_frames_xmitted_ok",
1838 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1839 },
1840 {
1841 .str = "a_multicast_frames_received_ok",
1842 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1843 },
1844 {
1845 .str = "a_broadcast_frames_received_ok",
1846 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1847 },
1848 {
1849 .str = "a_in_range_length_errors",
1850 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1851 },
1852 {
1853 .str = "a_out_of_range_length_field",
1854 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1855 },
1856 {
1857 .str = "a_frame_too_long_errors",
1858 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1859 },
1860 {
1861 .str = "a_symbol_error_during_carrier",
1862 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1863 },
1864 {
1865 .str = "a_mac_control_frames_transmitted",
1866 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1867 },
1868 {
1869 .str = "a_mac_control_frames_received",
1870 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1871 },
1872 {
1873 .str = "a_unsupported_opcodes_received",
1874 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1875 },
1876 {
1877 .str = "a_pause_mac_ctrl_frames_received",
1878 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1879 },
1880 {
1881 .str = "a_pause_mac_ctrl_frames_xmitted",
1882 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1883 },
1884 };
1885
1886 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1887
1888 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
1889 {
1890 .str = "ether_pkts64octets",
1891 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
1892 },
1893 {
1894 .str = "ether_pkts65to127octets",
1895 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
1896 },
1897 {
1898 .str = "ether_pkts128to255octets",
1899 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
1900 },
1901 {
1902 .str = "ether_pkts256to511octets",
1903 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
1904 },
1905 {
1906 .str = "ether_pkts512to1023octets",
1907 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
1908 },
1909 {
1910 .str = "ether_pkts1024to1518octets",
1911 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
1912 },
1913 {
1914 .str = "ether_pkts1519to2047octets",
1915 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
1916 },
1917 {
1918 .str = "ether_pkts2048to4095octets",
1919 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
1920 },
1921 {
1922 .str = "ether_pkts4096to8191octets",
1923 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
1924 },
1925 {
1926 .str = "ether_pkts8192to10239octets",
1927 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
1928 },
1929 };
1930
1931 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
1932 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
1933
1934 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1935 {
1936 .str = "rx_octets_prio",
1937 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1938 },
1939 {
1940 .str = "rx_frames_prio",
1941 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1942 },
1943 {
1944 .str = "tx_octets_prio",
1945 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1946 },
1947 {
1948 .str = "tx_frames_prio",
1949 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1950 },
1951 {
1952 .str = "rx_pause_prio",
1953 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1954 },
1955 {
1956 .str = "rx_pause_duration_prio",
1957 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1958 },
1959 {
1960 .str = "tx_pause_prio",
1961 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1962 },
1963 {
1964 .str = "tx_pause_duration_prio",
1965 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1966 },
1967 };
1968
1969 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1970
1971 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1972 {
1973 .str = "tc_transmit_queue_tc",
1974 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1975 .cells_bytes = true,
1976 },
1977 {
1978 .str = "tc_no_buffer_discard_uc_tc",
1979 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1980 },
1981 };
1982
1983 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1984
1985 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1986 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
1987 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
1988 IEEE_8021QAZ_MAX_TCS) + \
1989 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
1990 TC_MAX_QUEUE))
1991
mlxsw_sp_port_get_prio_strings(u8 ** p,int prio)1992 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1993 {
1994 int i;
1995
1996 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1997 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
1998 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1999 *p += ETH_GSTRING_LEN;
2000 }
2001 }
2002
mlxsw_sp_port_get_tc_strings(u8 ** p,int tc)2003 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2004 {
2005 int i;
2006
2007 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2008 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2009 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2010 *p += ETH_GSTRING_LEN;
2011 }
2012 }
2013
mlxsw_sp_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)2014 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2015 u32 stringset, u8 *data)
2016 {
2017 u8 *p = data;
2018 int i;
2019
2020 switch (stringset) {
2021 case ETH_SS_STATS:
2022 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2023 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2024 ETH_GSTRING_LEN);
2025 p += ETH_GSTRING_LEN;
2026 }
2027 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
2028 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
2029 ETH_GSTRING_LEN);
2030 p += ETH_GSTRING_LEN;
2031 }
2032
2033 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2034 mlxsw_sp_port_get_prio_strings(&p, i);
2035
2036 for (i = 0; i < TC_MAX_QUEUE; i++)
2037 mlxsw_sp_port_get_tc_strings(&p, i);
2038
2039 break;
2040 }
2041 }
2042
mlxsw_sp_port_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)2043 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2044 enum ethtool_phys_id_state state)
2045 {
2046 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2047 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2048 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2049 bool active;
2050
2051 switch (state) {
2052 case ETHTOOL_ID_ACTIVE:
2053 active = true;
2054 break;
2055 case ETHTOOL_ID_INACTIVE:
2056 active = false;
2057 break;
2058 default:
2059 return -EOPNOTSUPP;
2060 }
2061
2062 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2063 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2064 }
2065
2066 static int
mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats ** p_hw_stats,int * p_len,enum mlxsw_reg_ppcnt_grp grp)2067 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2068 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2069 {
2070 switch (grp) {
2071 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2072 *p_hw_stats = mlxsw_sp_port_hw_stats;
2073 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2074 break;
2075 case MLXSW_REG_PPCNT_RFC_2819_CNT:
2076 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
2077 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2078 break;
2079 case MLXSW_REG_PPCNT_PRIO_CNT:
2080 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2081 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2082 break;
2083 case MLXSW_REG_PPCNT_TC_CNT:
2084 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2085 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2086 break;
2087 default:
2088 WARN_ON(1);
2089 return -EOPNOTSUPP;
2090 }
2091 return 0;
2092 }
2093
__mlxsw_sp_port_get_stats(struct net_device * dev,enum mlxsw_reg_ppcnt_grp grp,int prio,u64 * data,int data_index)2094 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2095 enum mlxsw_reg_ppcnt_grp grp, int prio,
2096 u64 *data, int data_index)
2097 {
2098 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2099 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2100 struct mlxsw_sp_port_hw_stats *hw_stats;
2101 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2102 int i, len;
2103 int err;
2104
2105 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2106 if (err)
2107 return;
2108 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2109 for (i = 0; i < len; i++) {
2110 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2111 if (!hw_stats[i].cells_bytes)
2112 continue;
2113 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2114 data[data_index + i]);
2115 }
2116 }
2117
mlxsw_sp_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2118 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2119 struct ethtool_stats *stats, u64 *data)
2120 {
2121 int i, data_index = 0;
2122
2123 /* IEEE 802.3 Counters */
2124 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2125 data, data_index);
2126 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2127
2128 /* RFC 2819 Counters */
2129 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
2130 data, data_index);
2131 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2132
2133 /* Per-Priority Counters */
2134 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2135 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2136 data, data_index);
2137 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2138 }
2139
2140 /* Per-TC Counters */
2141 for (i = 0; i < TC_MAX_QUEUE; i++) {
2142 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2143 data, data_index);
2144 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2145 }
2146 }
2147
mlxsw_sp_port_get_sset_count(struct net_device * dev,int sset)2148 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2149 {
2150 switch (sset) {
2151 case ETH_SS_STATS:
2152 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2153 default:
2154 return -EOPNOTSUPP;
2155 }
2156 }
2157
2158 struct mlxsw_sp_port_link_mode {
2159 enum ethtool_link_mode_bit_indices mask_ethtool;
2160 u32 mask;
2161 u32 speed;
2162 };
2163
2164 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2165 {
2166 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2167 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2168 .speed = SPEED_100,
2169 },
2170 {
2171 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2172 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2173 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2174 .speed = SPEED_1000,
2175 },
2176 {
2177 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2178 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2179 .speed = SPEED_10000,
2180 },
2181 {
2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2183 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2184 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2185 .speed = SPEED_10000,
2186 },
2187 {
2188 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2189 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2190 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2191 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2192 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2193 .speed = SPEED_10000,
2194 },
2195 {
2196 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2197 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2198 .speed = SPEED_20000,
2199 },
2200 {
2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2202 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2203 .speed = SPEED_40000,
2204 },
2205 {
2206 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2207 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2208 .speed = SPEED_40000,
2209 },
2210 {
2211 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2212 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2213 .speed = SPEED_40000,
2214 },
2215 {
2216 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2217 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2218 .speed = SPEED_40000,
2219 },
2220 {
2221 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2222 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2223 .speed = SPEED_25000,
2224 },
2225 {
2226 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2227 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2228 .speed = SPEED_25000,
2229 },
2230 {
2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2232 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2233 .speed = SPEED_25000,
2234 },
2235 {
2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2237 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2238 .speed = SPEED_25000,
2239 },
2240 {
2241 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2242 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2243 .speed = SPEED_50000,
2244 },
2245 {
2246 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2247 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2248 .speed = SPEED_50000,
2249 },
2250 {
2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2252 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2253 .speed = SPEED_50000,
2254 },
2255 {
2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2257 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2258 .speed = SPEED_56000,
2259 },
2260 {
2261 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2262 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2263 .speed = SPEED_56000,
2264 },
2265 {
2266 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2267 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2268 .speed = SPEED_56000,
2269 },
2270 {
2271 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2272 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2273 .speed = SPEED_56000,
2274 },
2275 {
2276 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2277 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2278 .speed = SPEED_100000,
2279 },
2280 {
2281 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2282 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2283 .speed = SPEED_100000,
2284 },
2285 {
2286 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2287 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2288 .speed = SPEED_100000,
2289 },
2290 {
2291 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2292 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2293 .speed = SPEED_100000,
2294 },
2295 };
2296
2297 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2298
2299 static void
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)2300 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2301 struct ethtool_link_ksettings *cmd)
2302 {
2303 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2304 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2305 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2306 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2307 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2308 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2309 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2310
2311 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2312 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2313 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2314 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2315 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2316 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2317 }
2318
mlxsw_sp_from_ptys_link(u32 ptys_eth_proto,unsigned long * mode)2319 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2320 {
2321 int i;
2322
2323 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2324 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2325 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2326 mode);
2327 }
2328 }
2329
mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)2330 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2331 struct ethtool_link_ksettings *cmd)
2332 {
2333 u32 speed = SPEED_UNKNOWN;
2334 u8 duplex = DUPLEX_UNKNOWN;
2335 int i;
2336
2337 if (!carrier_ok)
2338 goto out;
2339
2340 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2341 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2342 speed = mlxsw_sp_port_link_mode[i].speed;
2343 duplex = DUPLEX_FULL;
2344 break;
2345 }
2346 }
2347 out:
2348 cmd->base.speed = speed;
2349 cmd->base.duplex = duplex;
2350 }
2351
mlxsw_sp_port_connector_port(u32 ptys_eth_proto)2352 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2353 {
2354 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2355 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2356 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2357 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2358 return PORT_FIBRE;
2359
2360 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2361 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2362 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2363 return PORT_DA;
2364
2365 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2366 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2367 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2368 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2369 return PORT_NONE;
2370
2371 return PORT_OTHER;
2372 }
2373
2374 static u32
mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings * cmd)2375 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2376 {
2377 u32 ptys_proto = 0;
2378 int i;
2379
2380 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2381 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2382 cmd->link_modes.advertising))
2383 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2384 }
2385 return ptys_proto;
2386 }
2387
mlxsw_sp_to_ptys_speed(u32 speed)2388 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2389 {
2390 u32 ptys_proto = 0;
2391 int i;
2392
2393 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2394 if (speed == mlxsw_sp_port_link_mode[i].speed)
2395 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2396 }
2397 return ptys_proto;
2398 }
2399
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)2400 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2401 {
2402 u32 ptys_proto = 0;
2403 int i;
2404
2405 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2406 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2407 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2408 }
2409 return ptys_proto;
2410 }
2411
mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,struct ethtool_link_ksettings * cmd)2412 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2413 struct ethtool_link_ksettings *cmd)
2414 {
2415 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2416 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2417 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2418
2419 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2420 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2421 }
2422
mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin,bool autoneg,struct ethtool_link_ksettings * cmd)2423 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2424 struct ethtool_link_ksettings *cmd)
2425 {
2426 if (!autoneg)
2427 return;
2428
2429 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2430 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2431 }
2432
2433 static void
mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp,u8 autoneg_status,struct ethtool_link_ksettings * cmd)2434 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2435 struct ethtool_link_ksettings *cmd)
2436 {
2437 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2438 return;
2439
2440 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2441 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2442 }
2443
mlxsw_sp_port_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2444 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2445 struct ethtool_link_ksettings *cmd)
2446 {
2447 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2448 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2450 char ptys_pl[MLXSW_REG_PTYS_LEN];
2451 u8 autoneg_status;
2452 bool autoneg;
2453 int err;
2454
2455 autoneg = mlxsw_sp_port->link.autoneg;
2456 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2457 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2458 if (err)
2459 return err;
2460 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2461 ð_proto_oper);
2462
2463 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2464
2465 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2466
2467 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2468 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2469 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2470
2471 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2472 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2473 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2474 cmd);
2475
2476 return 0;
2477 }
2478
2479 static int
mlxsw_sp_port_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2480 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2481 const struct ethtool_link_ksettings *cmd)
2482 {
2483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2485 char ptys_pl[MLXSW_REG_PTYS_LEN];
2486 u32 eth_proto_cap, eth_proto_new;
2487 bool autoneg;
2488 int err;
2489
2490 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2492 if (err)
2493 return err;
2494 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2495
2496 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2497 if (!autoneg && cmd->base.speed == SPEED_56000) {
2498 netdev_err(dev, "56G not supported with autoneg off\n");
2499 return -EINVAL;
2500 }
2501 eth_proto_new = autoneg ?
2502 mlxsw_sp_to_ptys_advert_link(cmd) :
2503 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2504
2505 eth_proto_new = eth_proto_new & eth_proto_cap;
2506 if (!eth_proto_new) {
2507 netdev_err(dev, "No supported speed requested\n");
2508 return -EINVAL;
2509 }
2510
2511 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2512 eth_proto_new, autoneg);
2513 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2514 if (err)
2515 return err;
2516
2517 mlxsw_sp_port->link.autoneg = autoneg;
2518
2519 if (!netif_running(dev))
2520 return 0;
2521
2522 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2523 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2524
2525 return 0;
2526 }
2527
mlxsw_sp_flash_device(struct net_device * dev,struct ethtool_flash * flash)2528 static int mlxsw_sp_flash_device(struct net_device *dev,
2529 struct ethtool_flash *flash)
2530 {
2531 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2533 const struct firmware *firmware;
2534 int err;
2535
2536 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2537 return -EOPNOTSUPP;
2538
2539 dev_hold(dev);
2540 rtnl_unlock();
2541
2542 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2543 if (err)
2544 goto out;
2545 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2546 release_firmware(firmware);
2547 out:
2548 rtnl_lock();
2549 dev_put(dev);
2550 return err;
2551 }
2552
2553 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2554 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2555 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2556
mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port * mlxsw_sp_port,u16 offset,u16 size,void * data,unsigned int * p_read_size)2557 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2558 u16 offset, u16 size, void *data,
2559 unsigned int *p_read_size)
2560 {
2561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2562 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2563 char mcia_pl[MLXSW_REG_MCIA_LEN];
2564 u16 i2c_addr;
2565 int status;
2566 int err;
2567
2568 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2569
2570 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2571 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2572 /* Cross pages read, read until offset 256 in low page */
2573 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2574
2575 i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2576 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2577 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2578 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2579 }
2580
2581 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2582 0, 0, offset, size, i2c_addr);
2583
2584 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2585 if (err)
2586 return err;
2587
2588 status = mlxsw_reg_mcia_status_get(mcia_pl);
2589 if (status)
2590 return -EIO;
2591
2592 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2593 memcpy(data, eeprom_tmp, size);
2594 *p_read_size = size;
2595
2596 return 0;
2597 }
2598
2599 enum mlxsw_sp_eeprom_module_info_rev_id {
2600 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2601 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2602 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2603 };
2604
2605 enum mlxsw_sp_eeprom_module_info_id {
2606 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2607 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2608 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2609 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2610 };
2611
2612 enum mlxsw_sp_eeprom_module_info {
2613 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2614 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2615 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2616 };
2617
mlxsw_sp_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)2618 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2619 struct ethtool_modinfo *modinfo)
2620 {
2621 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2622 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2623 u8 module_rev_id, module_id;
2624 unsigned int read_size;
2625 int err;
2626
2627 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2628 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2629 module_info, &read_size);
2630 if (err)
2631 return err;
2632
2633 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2634 return -EIO;
2635
2636 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2637 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2638
2639 switch (module_id) {
2640 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2641 modinfo->type = ETH_MODULE_SFF_8436;
2642 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2643 break;
2644 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2645 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2646 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2647 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2648 modinfo->type = ETH_MODULE_SFF_8636;
2649 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2650 } else {
2651 modinfo->type = ETH_MODULE_SFF_8436;
2652 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2653 }
2654 break;
2655 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2656 modinfo->type = ETH_MODULE_SFF_8472;
2657 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2658 break;
2659 default:
2660 return -EINVAL;
2661 }
2662
2663 return 0;
2664 }
2665
mlxsw_sp_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)2666 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2667 struct ethtool_eeprom *ee,
2668 u8 *data)
2669 {
2670 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2671 int offset = ee->offset;
2672 unsigned int read_size;
2673 int i = 0;
2674 int err;
2675
2676 if (!ee->len)
2677 return -EINVAL;
2678
2679 memset(data, 0, ee->len);
2680
2681 while (i < ee->len) {
2682 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2683 ee->len - i, data + i,
2684 &read_size);
2685 if (err) {
2686 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2687 return err;
2688 }
2689
2690 i += read_size;
2691 offset += read_size;
2692 }
2693
2694 return 0;
2695 }
2696
2697 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2698 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2699 .get_link = ethtool_op_get_link,
2700 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2701 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2702 .get_strings = mlxsw_sp_port_get_strings,
2703 .set_phys_id = mlxsw_sp_port_set_phys_id,
2704 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2705 .get_sset_count = mlxsw_sp_port_get_sset_count,
2706 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2707 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2708 .flash_device = mlxsw_sp_flash_device,
2709 .get_module_info = mlxsw_sp_get_module_info,
2710 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
2711 };
2712
2713 static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 width)2714 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2715 {
2716 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2717 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2718 char ptys_pl[MLXSW_REG_PTYS_LEN];
2719 u32 eth_proto_admin;
2720
2721 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2722 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2723 eth_proto_admin, mlxsw_sp_port->link.autoneg);
2724 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2725 }
2726
mlxsw_sp_port_ets_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,bool dwrr,u8 dwrr_weight)2727 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2728 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2729 bool dwrr, u8 dwrr_weight)
2730 {
2731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2732 char qeec_pl[MLXSW_REG_QEEC_LEN];
2733
2734 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2735 next_index);
2736 mlxsw_reg_qeec_de_set(qeec_pl, true);
2737 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2738 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2740 }
2741
mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 maxrate)2742 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2743 enum mlxsw_reg_qeec_hr hr, u8 index,
2744 u8 next_index, u32 maxrate)
2745 {
2746 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2747 char qeec_pl[MLXSW_REG_QEEC_LEN];
2748
2749 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2750 next_index);
2751 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2752 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2753 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2754 }
2755
mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 minrate)2756 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
2757 enum mlxsw_reg_qeec_hr hr, u8 index,
2758 u8 next_index, u32 minrate)
2759 {
2760 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2761 char qeec_pl[MLXSW_REG_QEEC_LEN];
2762
2763 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2764 next_index);
2765 mlxsw_reg_qeec_mise_set(qeec_pl, true);
2766 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
2767
2768 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2769 }
2770
mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 switch_prio,u8 tclass)2771 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2772 u8 switch_prio, u8 tclass)
2773 {
2774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2775 char qtct_pl[MLXSW_REG_QTCT_LEN];
2776
2777 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2778 tclass);
2779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2780 }
2781
mlxsw_sp_port_ets_init(struct mlxsw_sp_port * mlxsw_sp_port)2782 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2783 {
2784 int err, i;
2785
2786 /* Setup the elements hierarcy, so that each TC is linked to
2787 * one subgroup, which are all member in the same group.
2788 */
2789 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2790 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2791 0);
2792 if (err)
2793 return err;
2794 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2795 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2796 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2797 0, false, 0);
2798 if (err)
2799 return err;
2800 }
2801 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2802 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2803 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2804 false, 0);
2805 if (err)
2806 return err;
2807
2808 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2809 MLXSW_REG_QEEC_HIERARCY_TC,
2810 i + 8, i,
2811 true, 100);
2812 if (err)
2813 return err;
2814 }
2815
2816 /* Make sure the max shaper is disabled in all hierarchies that
2817 * support it.
2818 */
2819 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2820 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2821 MLXSW_REG_QEEC_MAS_DIS);
2822 if (err)
2823 return err;
2824 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2825 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2826 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2827 i, 0,
2828 MLXSW_REG_QEEC_MAS_DIS);
2829 if (err)
2830 return err;
2831 }
2832 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2833 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2834 MLXSW_REG_QEEC_HIERARCY_TC,
2835 i, i,
2836 MLXSW_REG_QEEC_MAS_DIS);
2837 if (err)
2838 return err;
2839
2840 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2841 MLXSW_REG_QEEC_HIERARCY_TC,
2842 i + 8, i,
2843 MLXSW_REG_QEEC_MAS_DIS);
2844 if (err)
2845 return err;
2846 }
2847
2848 /* Configure the min shaper for multicast TCs. */
2849 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2850 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
2851 MLXSW_REG_QEEC_HIERARCY_TC,
2852 i + 8, i,
2853 MLXSW_REG_QEEC_MIS_MIN);
2854 if (err)
2855 return err;
2856 }
2857
2858 /* Map all priorities to traffic class 0. */
2859 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2860 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2861 if (err)
2862 return err;
2863 }
2864
2865 return 0;
2866 }
2867
mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)2868 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
2869 bool enable)
2870 {
2871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2872 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
2873
2874 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
2875 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
2876 }
2877
mlxsw_sp_port_create(struct mlxsw_sp * mlxsw_sp,u8 local_port,bool split,u8 module,u8 width,u8 lane)2878 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2879 bool split, u8 module, u8 width, u8 lane)
2880 {
2881 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2882 struct mlxsw_sp_port *mlxsw_sp_port;
2883 struct net_device *dev;
2884 int err;
2885
2886 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2887 if (err) {
2888 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2889 local_port);
2890 return err;
2891 }
2892
2893 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2894 if (!dev) {
2895 err = -ENOMEM;
2896 goto err_alloc_etherdev;
2897 }
2898 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2899 mlxsw_sp_port = netdev_priv(dev);
2900 mlxsw_sp_port->dev = dev;
2901 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2902 mlxsw_sp_port->local_port = local_port;
2903 mlxsw_sp_port->pvid = 1;
2904 mlxsw_sp_port->split = split;
2905 mlxsw_sp_port->mapping.module = module;
2906 mlxsw_sp_port->mapping.width = width;
2907 mlxsw_sp_port->mapping.lane = lane;
2908 mlxsw_sp_port->link.autoneg = 1;
2909 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2910 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2911
2912 mlxsw_sp_port->pcpu_stats =
2913 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2914 if (!mlxsw_sp_port->pcpu_stats) {
2915 err = -ENOMEM;
2916 goto err_alloc_stats;
2917 }
2918
2919 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2920 GFP_KERNEL);
2921 if (!mlxsw_sp_port->sample) {
2922 err = -ENOMEM;
2923 goto err_alloc_sample;
2924 }
2925
2926 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
2927 &update_stats_cache);
2928
2929 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2930 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2931
2932 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2933 if (err) {
2934 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2935 mlxsw_sp_port->local_port);
2936 goto err_port_module_map;
2937 }
2938
2939 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2940 if (err) {
2941 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2942 mlxsw_sp_port->local_port);
2943 goto err_port_swid_set;
2944 }
2945
2946 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2947 if (err) {
2948 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2949 mlxsw_sp_port->local_port);
2950 goto err_dev_addr_init;
2951 }
2952
2953 netif_carrier_off(dev);
2954
2955 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2956 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2957 dev->hw_features |= NETIF_F_HW_TC;
2958
2959 dev->min_mtu = 0;
2960 dev->max_mtu = ETH_MAX_MTU;
2961
2962 /* Each packet needs to have a Tx header (metadata) on top all other
2963 * headers.
2964 */
2965 dev->needed_headroom = MLXSW_TXHDR_LEN;
2966
2967 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2968 if (err) {
2969 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2970 mlxsw_sp_port->local_port);
2971 goto err_port_system_port_mapping_set;
2972 }
2973
2974 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2975 if (err) {
2976 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2977 mlxsw_sp_port->local_port);
2978 goto err_port_speed_by_width_set;
2979 }
2980
2981 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2982 if (err) {
2983 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2984 mlxsw_sp_port->local_port);
2985 goto err_port_mtu_set;
2986 }
2987
2988 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2989 if (err)
2990 goto err_port_admin_status_set;
2991
2992 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2993 if (err) {
2994 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2995 mlxsw_sp_port->local_port);
2996 goto err_port_buffers_init;
2997 }
2998
2999 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
3000 if (err) {
3001 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
3002 mlxsw_sp_port->local_port);
3003 goto err_port_ets_init;
3004 }
3005
3006 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
3007 if (err) {
3008 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
3009 mlxsw_sp_port->local_port);
3010 goto err_port_tc_mc_mode;
3011 }
3012
3013 /* ETS and buffers must be initialized before DCB. */
3014 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
3015 if (err) {
3016 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
3017 mlxsw_sp_port->local_port);
3018 goto err_port_dcb_init;
3019 }
3020
3021 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
3022 if (err) {
3023 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
3024 mlxsw_sp_port->local_port);
3025 goto err_port_fids_init;
3026 }
3027
3028 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
3029 if (err) {
3030 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
3031 mlxsw_sp_port->local_port);
3032 goto err_port_qdiscs_init;
3033 }
3034
3035 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
3036 if (IS_ERR(mlxsw_sp_port_vlan)) {
3037 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3038 mlxsw_sp_port->local_port);
3039 err = PTR_ERR(mlxsw_sp_port_vlan);
3040 goto err_port_vlan_get;
3041 }
3042
3043 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
3044 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3045 err = register_netdev(dev);
3046 if (err) {
3047 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3048 mlxsw_sp_port->local_port);
3049 goto err_register_netdev;
3050 }
3051
3052 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3053 mlxsw_sp_port, dev, module + 1,
3054 mlxsw_sp_port->split, lane / width);
3055 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
3056 return 0;
3057
3058 err_register_netdev:
3059 mlxsw_sp->ports[local_port] = NULL;
3060 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3061 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
3062 err_port_vlan_get:
3063 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3064 err_port_qdiscs_init:
3065 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3066 err_port_fids_init:
3067 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3068 err_port_dcb_init:
3069 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3070 err_port_tc_mc_mode:
3071 err_port_ets_init:
3072 err_port_buffers_init:
3073 err_port_admin_status_set:
3074 err_port_mtu_set:
3075 err_port_speed_by_width_set:
3076 err_port_system_port_mapping_set:
3077 err_dev_addr_init:
3078 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3079 err_port_swid_set:
3080 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3081 err_port_module_map:
3082 kfree(mlxsw_sp_port->sample);
3083 err_alloc_sample:
3084 free_percpu(mlxsw_sp_port->pcpu_stats);
3085 err_alloc_stats:
3086 free_netdev(dev);
3087 err_alloc_etherdev:
3088 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3089 return err;
3090 }
3091
mlxsw_sp_port_remove(struct mlxsw_sp * mlxsw_sp,u8 local_port)3092 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3093 {
3094 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3095
3096 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3097 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3098 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3099 mlxsw_sp->ports[local_port] = NULL;
3100 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3101 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3102 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3103 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3104 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3105 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3106 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3107 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3108 kfree(mlxsw_sp_port->sample);
3109 free_percpu(mlxsw_sp_port->pcpu_stats);
3110 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3111 free_netdev(mlxsw_sp_port->dev);
3112 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3113 }
3114
mlxsw_sp_port_created(struct mlxsw_sp * mlxsw_sp,u8 local_port)3115 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3116 {
3117 return mlxsw_sp->ports[local_port] != NULL;
3118 }
3119
mlxsw_sp_ports_remove(struct mlxsw_sp * mlxsw_sp)3120 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3121 {
3122 int i;
3123
3124 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3125 if (mlxsw_sp_port_created(mlxsw_sp, i))
3126 mlxsw_sp_port_remove(mlxsw_sp, i);
3127 kfree(mlxsw_sp->port_to_module);
3128 kfree(mlxsw_sp->ports);
3129 mlxsw_sp->ports = NULL;
3130 }
3131
mlxsw_sp_ports_create(struct mlxsw_sp * mlxsw_sp)3132 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3133 {
3134 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3135 u8 module, width, lane;
3136 size_t alloc_size;
3137 int i;
3138 int err;
3139
3140 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3141 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3142 if (!mlxsw_sp->ports)
3143 return -ENOMEM;
3144
3145 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3146 GFP_KERNEL);
3147 if (!mlxsw_sp->port_to_module) {
3148 err = -ENOMEM;
3149 goto err_port_to_module_alloc;
3150 }
3151
3152 for (i = 1; i < max_ports; i++) {
3153 /* Mark as invalid */
3154 mlxsw_sp->port_to_module[i] = -1;
3155
3156 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3157 &width, &lane);
3158 if (err)
3159 goto err_port_module_info_get;
3160 if (!width)
3161 continue;
3162 mlxsw_sp->port_to_module[i] = module;
3163 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3164 module, width, lane);
3165 if (err)
3166 goto err_port_create;
3167 }
3168 return 0;
3169
3170 err_port_create:
3171 err_port_module_info_get:
3172 for (i--; i >= 1; i--)
3173 if (mlxsw_sp_port_created(mlxsw_sp, i))
3174 mlxsw_sp_port_remove(mlxsw_sp, i);
3175 kfree(mlxsw_sp->port_to_module);
3176 err_port_to_module_alloc:
3177 kfree(mlxsw_sp->ports);
3178 mlxsw_sp->ports = NULL;
3179 return err;
3180 }
3181
mlxsw_sp_cluster_base_port_get(u8 local_port)3182 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3183 {
3184 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3185
3186 return local_port - offset;
3187 }
3188
mlxsw_sp_port_split_create(struct mlxsw_sp * mlxsw_sp,u8 base_port,u8 module,unsigned int count)3189 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3190 u8 module, unsigned int count)
3191 {
3192 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3193 int err, i;
3194
3195 for (i = 0; i < count; i++) {
3196 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3197 module, width, i * width);
3198 if (err)
3199 goto err_port_create;
3200 }
3201
3202 return 0;
3203
3204 err_port_create:
3205 for (i--; i >= 0; i--)
3206 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3207 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3208 return err;
3209 }
3210
mlxsw_sp_port_unsplit_create(struct mlxsw_sp * mlxsw_sp,u8 base_port,unsigned int count)3211 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3212 u8 base_port, unsigned int count)
3213 {
3214 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3215 int i;
3216
3217 /* Split by four means we need to re-create two ports, otherwise
3218 * only one.
3219 */
3220 count = count / 2;
3221
3222 for (i = 0; i < count; i++) {
3223 local_port = base_port + i * 2;
3224 if (mlxsw_sp->port_to_module[local_port] < 0)
3225 continue;
3226 module = mlxsw_sp->port_to_module[local_port];
3227
3228 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3229 width, 0);
3230 }
3231 }
3232
3233 static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp * mlxsw_sp,u8 local_port)3234 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3235 {
3236 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
3237 return mlxsw_sp->ports[local_port];
3238 return NULL;
3239 }
3240
mlxsw_sp_port_split(struct mlxsw_core * mlxsw_core,u8 local_port,unsigned int count,struct netlink_ext_ack * extack)3241 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3242 unsigned int count,
3243 struct netlink_ext_ack *extack)
3244 {
3245 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3246 struct mlxsw_sp_port *mlxsw_sp_port;
3247 u8 module, cur_width, base_port;
3248 int i;
3249 int err;
3250
3251 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
3252 if (!mlxsw_sp_port) {
3253 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3254 local_port);
3255 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3256 return -EINVAL;
3257 }
3258
3259 module = mlxsw_sp_port->mapping.module;
3260 cur_width = mlxsw_sp_port->mapping.width;
3261
3262 if (count != 2 && count != 4) {
3263 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3264 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
3265 return -EINVAL;
3266 }
3267
3268 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3269 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3270 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
3271 return -EINVAL;
3272 }
3273
3274 /* Make sure we have enough slave (even) ports for the split. */
3275 if (count == 2) {
3276 base_port = local_port;
3277 if (mlxsw_sp->ports[base_port + 1]) {
3278 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3279 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3280 return -EINVAL;
3281 }
3282 } else {
3283 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3284 if (mlxsw_sp->ports[base_port + 1] ||
3285 mlxsw_sp->ports[base_port + 3]) {
3286 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3287 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3288 return -EINVAL;
3289 }
3290 }
3291
3292 for (i = 0; i < count; i++)
3293 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3294 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3295
3296 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3297 if (err) {
3298 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3299 goto err_port_split_create;
3300 }
3301
3302 return 0;
3303
3304 err_port_split_create:
3305 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3306 return err;
3307 }
3308
mlxsw_sp_port_unsplit(struct mlxsw_core * mlxsw_core,u8 local_port,struct netlink_ext_ack * extack)3309 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
3310 struct netlink_ext_ack *extack)
3311 {
3312 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3313 struct mlxsw_sp_port *mlxsw_sp_port;
3314 u8 cur_width, base_port;
3315 unsigned int count;
3316 int i;
3317
3318 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
3319 if (!mlxsw_sp_port) {
3320 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3321 local_port);
3322 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3323 return -EINVAL;
3324 }
3325
3326 if (!mlxsw_sp_port->split) {
3327 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
3328 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
3329 return -EINVAL;
3330 }
3331
3332 cur_width = mlxsw_sp_port->mapping.width;
3333 count = cur_width == 1 ? 4 : 2;
3334
3335 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3336
3337 /* Determine which ports to remove. */
3338 if (count == 2 && local_port >= base_port + 2)
3339 base_port = base_port + 2;
3340
3341 for (i = 0; i < count; i++)
3342 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3343 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3344
3345 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3346
3347 return 0;
3348 }
3349
3350 static void
mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port * mlxsw_sp_port)3351 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
3352 {
3353 int i;
3354
3355 for (i = 0; i < TC_MAX_QUEUE; i++)
3356 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
3357 }
3358
mlxsw_sp_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)3359 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3360 char *pude_pl, void *priv)
3361 {
3362 struct mlxsw_sp *mlxsw_sp = priv;
3363 struct mlxsw_sp_port *mlxsw_sp_port;
3364 enum mlxsw_reg_pude_oper_status status;
3365 u8 local_port;
3366
3367 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3368 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3369 if (!mlxsw_sp_port)
3370 return;
3371
3372 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3373 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3374 netdev_info(mlxsw_sp_port->dev, "link up\n");
3375 netif_carrier_on(mlxsw_sp_port->dev);
3376 } else {
3377 netdev_info(mlxsw_sp_port->dev, "link down\n");
3378 netif_carrier_off(mlxsw_sp_port->dev);
3379 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
3380 }
3381 }
3382
mlxsw_sp_rx_listener_no_mark_func(struct sk_buff * skb,u8 local_port,void * priv)3383 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3384 u8 local_port, void *priv)
3385 {
3386 struct mlxsw_sp *mlxsw_sp = priv;
3387 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3388 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3389
3390 if (unlikely(!mlxsw_sp_port)) {
3391 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3392 local_port);
3393 return;
3394 }
3395
3396 skb->dev = mlxsw_sp_port->dev;
3397
3398 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3399 u64_stats_update_begin(&pcpu_stats->syncp);
3400 pcpu_stats->rx_packets++;
3401 pcpu_stats->rx_bytes += skb->len;
3402 u64_stats_update_end(&pcpu_stats->syncp);
3403
3404 skb->protocol = eth_type_trans(skb, skb->dev);
3405 netif_receive_skb(skb);
3406 }
3407
mlxsw_sp_rx_listener_mark_func(struct sk_buff * skb,u8 local_port,void * priv)3408 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3409 void *priv)
3410 {
3411 skb->offload_fwd_mark = 1;
3412 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3413 }
3414
mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff * skb,u8 local_port,void * priv)3415 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
3416 u8 local_port, void *priv)
3417 {
3418 skb->offload_mr_fwd_mark = 1;
3419 skb->offload_fwd_mark = 1;
3420 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3421 }
3422
mlxsw_sp_rx_listener_sample_func(struct sk_buff * skb,u8 local_port,void * priv)3423 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3424 void *priv)
3425 {
3426 struct mlxsw_sp *mlxsw_sp = priv;
3427 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3428 struct psample_group *psample_group;
3429 u32 size;
3430
3431 if (unlikely(!mlxsw_sp_port)) {
3432 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3433 local_port);
3434 goto out;
3435 }
3436 if (unlikely(!mlxsw_sp_port->sample)) {
3437 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3438 local_port);
3439 goto out;
3440 }
3441
3442 size = mlxsw_sp_port->sample->truncate ?
3443 mlxsw_sp_port->sample->trunc_size : skb->len;
3444
3445 rcu_read_lock();
3446 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3447 if (!psample_group)
3448 goto out_unlock;
3449 psample_sample_packet(psample_group, skb, size,
3450 mlxsw_sp_port->dev->ifindex, 0,
3451 mlxsw_sp_port->sample->rate);
3452 out_unlock:
3453 rcu_read_unlock();
3454 out:
3455 consume_skb(skb);
3456 }
3457
3458 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3459 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3460 _is_ctrl, SP_##_trap_group, DISCARD)
3461
3462 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3463 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3464 _is_ctrl, SP_##_trap_group, DISCARD)
3465
3466 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3467 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
3468 _is_ctrl, SP_##_trap_group, DISCARD)
3469
3470 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3471 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3472
3473 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3474 /* Events */
3475 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3476 /* L2 traps */
3477 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3478 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3479 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3480 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3481 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3482 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3483 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3484 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3485 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3486 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3487 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3488 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3489 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3490 false),
3491 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3492 false),
3493 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3494 false),
3495 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3496 false),
3497 /* L3 traps */
3498 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3499 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3500 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3501 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3502 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3503 false),
3504 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3505 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3506 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3507 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3508 false),
3509 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3510 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3511 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3512 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3513 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3514 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3515 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3516 false),
3517 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3518 false),
3519 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3520 false),
3521 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3522 false),
3523 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3524 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3525 false),
3526 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3527 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3528 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3529 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3530 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3531 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3532 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3533 /* PKT Sample trap */
3534 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3535 false, SP_IP2ME, DISCARD),
3536 /* ACL trap */
3537 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3538 /* Multicast Router Traps */
3539 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
3540 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
3541 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
3542 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
3543 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
3544 };
3545
mlxsw_sp_cpu_policers_set(struct mlxsw_core * mlxsw_core)3546 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3547 {
3548 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3549 enum mlxsw_reg_qpcr_ir_units ir_units;
3550 int max_cpu_policers;
3551 bool is_bytes;
3552 u8 burst_size;
3553 u32 rate;
3554 int i, err;
3555
3556 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3557 return -EIO;
3558
3559 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3560
3561 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3562 for (i = 0; i < max_cpu_policers; i++) {
3563 is_bytes = false;
3564 switch (i) {
3565 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3566 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3567 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3568 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3571 rate = 128;
3572 burst_size = 7;
3573 break;
3574 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3575 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3576 rate = 16 * 1024;
3577 burst_size = 10;
3578 break;
3579 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3583 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3584 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3585 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3586 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3587 rate = 1024;
3588 burst_size = 7;
3589 break;
3590 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3591 rate = 4 * 1024;
3592 burst_size = 4;
3593 break;
3594 default:
3595 continue;
3596 }
3597
3598 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3599 burst_size);
3600 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3601 if (err)
3602 return err;
3603 }
3604
3605 return 0;
3606 }
3607
mlxsw_sp_trap_groups_set(struct mlxsw_core * mlxsw_core)3608 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3609 {
3610 char htgt_pl[MLXSW_REG_HTGT_LEN];
3611 enum mlxsw_reg_htgt_trap_group i;
3612 int max_cpu_policers;
3613 int max_trap_groups;
3614 u8 priority, tc;
3615 u16 policer_id;
3616 int err;
3617
3618 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3619 return -EIO;
3620
3621 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3622 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3623
3624 for (i = 0; i < max_trap_groups; i++) {
3625 policer_id = i;
3626 switch (i) {
3627 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3628 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3629 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3630 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3631 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3632 priority = 5;
3633 tc = 5;
3634 break;
3635 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3636 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3637 priority = 4;
3638 tc = 4;
3639 break;
3640 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3641 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3642 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3643 priority = 3;
3644 tc = 3;
3645 break;
3646 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3647 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3648 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3649 priority = 2;
3650 tc = 2;
3651 break;
3652 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3653 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3654 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3655 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3656 priority = 1;
3657 tc = 1;
3658 break;
3659 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3660 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3661 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3662 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3663 break;
3664 default:
3665 continue;
3666 }
3667
3668 if (max_cpu_policers <= policer_id &&
3669 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3670 return -EIO;
3671
3672 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3673 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3674 if (err)
3675 return err;
3676 }
3677
3678 return 0;
3679 }
3680
mlxsw_sp_traps_init(struct mlxsw_sp * mlxsw_sp)3681 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3682 {
3683 int i;
3684 int err;
3685
3686 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3687 if (err)
3688 return err;
3689
3690 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3691 if (err)
3692 return err;
3693
3694 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3695 err = mlxsw_core_trap_register(mlxsw_sp->core,
3696 &mlxsw_sp_listener[i],
3697 mlxsw_sp);
3698 if (err)
3699 goto err_listener_register;
3700
3701 }
3702 return 0;
3703
3704 err_listener_register:
3705 for (i--; i >= 0; i--) {
3706 mlxsw_core_trap_unregister(mlxsw_sp->core,
3707 &mlxsw_sp_listener[i],
3708 mlxsw_sp);
3709 }
3710 return err;
3711 }
3712
mlxsw_sp_traps_fini(struct mlxsw_sp * mlxsw_sp)3713 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3714 {
3715 int i;
3716
3717 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3718 mlxsw_core_trap_unregister(mlxsw_sp->core,
3719 &mlxsw_sp_listener[i],
3720 mlxsw_sp);
3721 }
3722 }
3723
mlxsw_sp_lag_init(struct mlxsw_sp * mlxsw_sp)3724 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3725 {
3726 char slcr_pl[MLXSW_REG_SLCR_LEN];
3727 int err;
3728
3729 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3730 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3731 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3732 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3733 MLXSW_REG_SLCR_LAG_HASH_SIP |
3734 MLXSW_REG_SLCR_LAG_HASH_DIP |
3735 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3736 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3737 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3738 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3739 if (err)
3740 return err;
3741
3742 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3743 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3744 return -EIO;
3745
3746 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3747 sizeof(struct mlxsw_sp_upper),
3748 GFP_KERNEL);
3749 if (!mlxsw_sp->lags)
3750 return -ENOMEM;
3751
3752 return 0;
3753 }
3754
mlxsw_sp_lag_fini(struct mlxsw_sp * mlxsw_sp)3755 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3756 {
3757 kfree(mlxsw_sp->lags);
3758 }
3759
mlxsw_sp_basic_trap_groups_set(struct mlxsw_core * mlxsw_core)3760 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3761 {
3762 char htgt_pl[MLXSW_REG_HTGT_LEN];
3763
3764 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3765 MLXSW_REG_HTGT_INVALID_POLICER,
3766 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3767 MLXSW_REG_HTGT_DEFAULT_TC);
3768 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3769 }
3770
3771 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3772 unsigned long event, void *ptr);
3773
mlxsw_sp_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)3774 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3775 const struct mlxsw_bus_info *mlxsw_bus_info)
3776 {
3777 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3778 int err;
3779
3780 mlxsw_sp->core = mlxsw_core;
3781 mlxsw_sp->bus_info = mlxsw_bus_info;
3782
3783 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3784 if (err)
3785 return err;
3786
3787 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3788 if (err) {
3789 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3790 return err;
3791 }
3792
3793 err = mlxsw_sp_kvdl_init(mlxsw_sp);
3794 if (err) {
3795 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3796 return err;
3797 }
3798
3799 err = mlxsw_sp_fids_init(mlxsw_sp);
3800 if (err) {
3801 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3802 goto err_fids_init;
3803 }
3804
3805 err = mlxsw_sp_traps_init(mlxsw_sp);
3806 if (err) {
3807 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3808 goto err_traps_init;
3809 }
3810
3811 err = mlxsw_sp_buffers_init(mlxsw_sp);
3812 if (err) {
3813 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3814 goto err_buffers_init;
3815 }
3816
3817 err = mlxsw_sp_lag_init(mlxsw_sp);
3818 if (err) {
3819 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3820 goto err_lag_init;
3821 }
3822
3823 /* Initialize SPAN before router and switchdev, so that those components
3824 * can call mlxsw_sp_span_respin().
3825 */
3826 err = mlxsw_sp_span_init(mlxsw_sp);
3827 if (err) {
3828 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3829 goto err_span_init;
3830 }
3831
3832 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3833 if (err) {
3834 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3835 goto err_switchdev_init;
3836 }
3837
3838 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3839 if (err) {
3840 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3841 goto err_counter_pool_init;
3842 }
3843
3844 err = mlxsw_sp_afa_init(mlxsw_sp);
3845 if (err) {
3846 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3847 goto err_afa_init;
3848 }
3849
3850 err = mlxsw_sp_router_init(mlxsw_sp);
3851 if (err) {
3852 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3853 goto err_router_init;
3854 }
3855
3856 /* Initialize netdevice notifier after router and SPAN is initialized,
3857 * so that the event handler can use router structures and call SPAN
3858 * respin.
3859 */
3860 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3861 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3862 if (err) {
3863 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3864 goto err_netdev_notifier;
3865 }
3866
3867 err = mlxsw_sp_acl_init(mlxsw_sp);
3868 if (err) {
3869 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3870 goto err_acl_init;
3871 }
3872
3873 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3874 if (err) {
3875 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3876 goto err_dpipe_init;
3877 }
3878
3879 err = mlxsw_sp_ports_create(mlxsw_sp);
3880 if (err) {
3881 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3882 goto err_ports_create;
3883 }
3884
3885 return 0;
3886
3887 err_ports_create:
3888 mlxsw_sp_dpipe_fini(mlxsw_sp);
3889 err_dpipe_init:
3890 mlxsw_sp_acl_fini(mlxsw_sp);
3891 err_acl_init:
3892 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3893 err_netdev_notifier:
3894 mlxsw_sp_router_fini(mlxsw_sp);
3895 err_router_init:
3896 mlxsw_sp_afa_fini(mlxsw_sp);
3897 err_afa_init:
3898 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3899 err_counter_pool_init:
3900 mlxsw_sp_switchdev_fini(mlxsw_sp);
3901 err_switchdev_init:
3902 mlxsw_sp_span_fini(mlxsw_sp);
3903 err_span_init:
3904 mlxsw_sp_lag_fini(mlxsw_sp);
3905 err_lag_init:
3906 mlxsw_sp_buffers_fini(mlxsw_sp);
3907 err_buffers_init:
3908 mlxsw_sp_traps_fini(mlxsw_sp);
3909 err_traps_init:
3910 mlxsw_sp_fids_fini(mlxsw_sp);
3911 err_fids_init:
3912 mlxsw_sp_kvdl_fini(mlxsw_sp);
3913 return err;
3914 }
3915
mlxsw_sp1_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)3916 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3917 const struct mlxsw_bus_info *mlxsw_bus_info)
3918 {
3919 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3920
3921 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
3922 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
3923 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3924 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3925 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3926 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3927 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3928
3929 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
3930 }
3931
mlxsw_sp2_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)3932 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3933 const struct mlxsw_bus_info *mlxsw_bus_info)
3934 {
3935 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3936
3937 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3938 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3939 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3940 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3941 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3942
3943 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
3944 }
3945
mlxsw_sp_fini(struct mlxsw_core * mlxsw_core)3946 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3947 {
3948 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3949
3950 mlxsw_sp_ports_remove(mlxsw_sp);
3951 mlxsw_sp_dpipe_fini(mlxsw_sp);
3952 mlxsw_sp_acl_fini(mlxsw_sp);
3953 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3954 mlxsw_sp_router_fini(mlxsw_sp);
3955 mlxsw_sp_afa_fini(mlxsw_sp);
3956 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3957 mlxsw_sp_switchdev_fini(mlxsw_sp);
3958 mlxsw_sp_span_fini(mlxsw_sp);
3959 mlxsw_sp_lag_fini(mlxsw_sp);
3960 mlxsw_sp_buffers_fini(mlxsw_sp);
3961 mlxsw_sp_traps_fini(mlxsw_sp);
3962 mlxsw_sp_fids_fini(mlxsw_sp);
3963 mlxsw_sp_kvdl_fini(mlxsw_sp);
3964 }
3965
3966 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3967 .used_max_mid = 1,
3968 .max_mid = MLXSW_SP_MID_MAX,
3969 .used_flood_tables = 1,
3970 .used_flood_mode = 1,
3971 .flood_mode = 3,
3972 .max_fid_offset_flood_tables = 3,
3973 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3974 .max_fid_flood_tables = 3,
3975 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
3976 .used_max_ib_mc = 1,
3977 .max_ib_mc = 0,
3978 .used_max_pkey = 1,
3979 .max_pkey = 0,
3980 .used_kvd_sizes = 1,
3981 .kvd_hash_single_parts = 59,
3982 .kvd_hash_double_parts = 41,
3983 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3984 .swid_config = {
3985 {
3986 .used_type = 1,
3987 .type = MLXSW_PORT_SWID_TYPE_ETH,
3988 }
3989 },
3990 };
3991
3992 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3993 .used_max_mid = 1,
3994 .max_mid = MLXSW_SP_MID_MAX,
3995 .used_flood_tables = 1,
3996 .used_flood_mode = 1,
3997 .flood_mode = 3,
3998 .max_fid_offset_flood_tables = 3,
3999 .fid_offset_flood_table_size = VLAN_N_VID - 1,
4000 .max_fid_flood_tables = 3,
4001 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
4002 .used_max_ib_mc = 1,
4003 .max_ib_mc = 0,
4004 .used_max_pkey = 1,
4005 .max_pkey = 0,
4006 .swid_config = {
4007 {
4008 .used_type = 1,
4009 .type = MLXSW_PORT_SWID_TYPE_ETH,
4010 }
4011 },
4012 };
4013
4014 static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core * mlxsw_core,struct devlink_resource_size_params * kvd_size_params,struct devlink_resource_size_params * linear_size_params,struct devlink_resource_size_params * hash_double_size_params,struct devlink_resource_size_params * hash_single_size_params)4015 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4016 struct devlink_resource_size_params *kvd_size_params,
4017 struct devlink_resource_size_params *linear_size_params,
4018 struct devlink_resource_size_params *hash_double_size_params,
4019 struct devlink_resource_size_params *hash_single_size_params)
4020 {
4021 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4022 KVD_SINGLE_MIN_SIZE);
4023 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4024 KVD_DOUBLE_MIN_SIZE);
4025 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4026 u32 linear_size_min = 0;
4027
4028 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4029 MLXSW_SP_KVD_GRANULARITY,
4030 DEVLINK_RESOURCE_UNIT_ENTRY);
4031 devlink_resource_size_params_init(linear_size_params, linear_size_min,
4032 kvd_size - single_size_min -
4033 double_size_min,
4034 MLXSW_SP_KVD_GRANULARITY,
4035 DEVLINK_RESOURCE_UNIT_ENTRY);
4036 devlink_resource_size_params_init(hash_double_size_params,
4037 double_size_min,
4038 kvd_size - single_size_min -
4039 linear_size_min,
4040 MLXSW_SP_KVD_GRANULARITY,
4041 DEVLINK_RESOURCE_UNIT_ENTRY);
4042 devlink_resource_size_params_init(hash_single_size_params,
4043 single_size_min,
4044 kvd_size - double_size_min -
4045 linear_size_min,
4046 MLXSW_SP_KVD_GRANULARITY,
4047 DEVLINK_RESOURCE_UNIT_ENTRY);
4048 }
4049
mlxsw_sp1_resources_kvd_register(struct mlxsw_core * mlxsw_core)4050 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
4051 {
4052 struct devlink *devlink = priv_to_devlink(mlxsw_core);
4053 struct devlink_resource_size_params hash_single_size_params;
4054 struct devlink_resource_size_params hash_double_size_params;
4055 struct devlink_resource_size_params linear_size_params;
4056 struct devlink_resource_size_params kvd_size_params;
4057 u32 kvd_size, single_size, double_size, linear_size;
4058 const struct mlxsw_config_profile *profile;
4059 int err;
4060
4061 profile = &mlxsw_sp1_config_profile;
4062 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4063 return -EIO;
4064
4065 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4066 &linear_size_params,
4067 &hash_double_size_params,
4068 &hash_single_size_params);
4069
4070 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4071 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4072 kvd_size, MLXSW_SP_RESOURCE_KVD,
4073 DEVLINK_RESOURCE_ID_PARENT_TOP,
4074 &kvd_size_params);
4075 if (err)
4076 return err;
4077
4078 linear_size = profile->kvd_linear_size;
4079 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
4080 linear_size,
4081 MLXSW_SP_RESOURCE_KVD_LINEAR,
4082 MLXSW_SP_RESOURCE_KVD,
4083 &linear_size_params);
4084 if (err)
4085 return err;
4086
4087 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
4088 if (err)
4089 return err;
4090
4091 double_size = kvd_size - linear_size;
4092 double_size *= profile->kvd_hash_double_parts;
4093 double_size /= profile->kvd_hash_double_parts +
4094 profile->kvd_hash_single_parts;
4095 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
4096 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
4097 double_size,
4098 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4099 MLXSW_SP_RESOURCE_KVD,
4100 &hash_double_size_params);
4101 if (err)
4102 return err;
4103
4104 single_size = kvd_size - double_size - linear_size;
4105 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
4106 single_size,
4107 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4108 MLXSW_SP_RESOURCE_KVD,
4109 &hash_single_size_params);
4110 if (err)
4111 return err;
4112
4113 return 0;
4114 }
4115
mlxsw_sp1_resources_register(struct mlxsw_core * mlxsw_core)4116 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
4117 {
4118 return mlxsw_sp1_resources_kvd_register(mlxsw_core);
4119 }
4120
mlxsw_sp2_resources_register(struct mlxsw_core * mlxsw_core)4121 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
4122 {
4123 return 0;
4124 }
4125
mlxsw_sp_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)4126 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
4127 const struct mlxsw_config_profile *profile,
4128 u64 *p_single_size, u64 *p_double_size,
4129 u64 *p_linear_size)
4130 {
4131 struct devlink *devlink = priv_to_devlink(mlxsw_core);
4132 u32 double_size;
4133 int err;
4134
4135 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4136 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
4137 return -EIO;
4138
4139 /* The hash part is what left of the kvd without the
4140 * linear part. It is split to the single size and
4141 * double size by the parts ratio from the profile.
4142 * Both sizes must be a multiplications of the
4143 * granularity from the profile. In case the user
4144 * provided the sizes they are obtained via devlink.
4145 */
4146 err = devlink_resource_size_get(devlink,
4147 MLXSW_SP_RESOURCE_KVD_LINEAR,
4148 p_linear_size);
4149 if (err)
4150 *p_linear_size = profile->kvd_linear_size;
4151
4152 err = devlink_resource_size_get(devlink,
4153 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4154 p_double_size);
4155 if (err) {
4156 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4157 *p_linear_size;
4158 double_size *= profile->kvd_hash_double_parts;
4159 double_size /= profile->kvd_hash_double_parts +
4160 profile->kvd_hash_single_parts;
4161 *p_double_size = rounddown(double_size,
4162 MLXSW_SP_KVD_GRANULARITY);
4163 }
4164
4165 err = devlink_resource_size_get(devlink,
4166 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4167 p_single_size);
4168 if (err)
4169 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4170 *p_double_size - *p_linear_size;
4171
4172 /* Check results are legal. */
4173 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4174 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
4175 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
4176 return -EIO;
4177
4178 return 0;
4179 }
4180
4181 static struct mlxsw_driver mlxsw_sp1_driver = {
4182 .kind = mlxsw_sp1_driver_name,
4183 .priv_size = sizeof(struct mlxsw_sp),
4184 .init = mlxsw_sp1_init,
4185 .fini = mlxsw_sp_fini,
4186 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
4187 .port_split = mlxsw_sp_port_split,
4188 .port_unsplit = mlxsw_sp_port_unsplit,
4189 .sb_pool_get = mlxsw_sp_sb_pool_get,
4190 .sb_pool_set = mlxsw_sp_sb_pool_set,
4191 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4192 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4193 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4194 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4195 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4196 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4197 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4198 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4199 .txhdr_construct = mlxsw_sp_txhdr_construct,
4200 .resources_register = mlxsw_sp1_resources_register,
4201 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
4202 .txhdr_len = MLXSW_TXHDR_LEN,
4203 .profile = &mlxsw_sp1_config_profile,
4204 .res_query_enabled = true,
4205 };
4206
4207 static struct mlxsw_driver mlxsw_sp2_driver = {
4208 .kind = mlxsw_sp2_driver_name,
4209 .priv_size = sizeof(struct mlxsw_sp),
4210 .init = mlxsw_sp2_init,
4211 .fini = mlxsw_sp_fini,
4212 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
4213 .port_split = mlxsw_sp_port_split,
4214 .port_unsplit = mlxsw_sp_port_unsplit,
4215 .sb_pool_get = mlxsw_sp_sb_pool_get,
4216 .sb_pool_set = mlxsw_sp_sb_pool_set,
4217 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4218 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4219 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4220 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4221 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4222 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4223 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4224 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4225 .txhdr_construct = mlxsw_sp_txhdr_construct,
4226 .resources_register = mlxsw_sp2_resources_register,
4227 .txhdr_len = MLXSW_TXHDR_LEN,
4228 .profile = &mlxsw_sp2_config_profile,
4229 .res_query_enabled = true,
4230 };
4231
mlxsw_sp_port_dev_check(const struct net_device * dev)4232 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4233 {
4234 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4235 }
4236
mlxsw_sp_lower_dev_walk(struct net_device * lower_dev,void * data)4237 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
4238 {
4239 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
4240 int ret = 0;
4241
4242 if (mlxsw_sp_port_dev_check(lower_dev)) {
4243 *p_mlxsw_sp_port = netdev_priv(lower_dev);
4244 ret = 1;
4245 }
4246
4247 return ret;
4248 }
4249
mlxsw_sp_port_dev_lower_find(struct net_device * dev)4250 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4251 {
4252 struct mlxsw_sp_port *mlxsw_sp_port;
4253
4254 if (mlxsw_sp_port_dev_check(dev))
4255 return netdev_priv(dev);
4256
4257 mlxsw_sp_port = NULL;
4258 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
4259
4260 return mlxsw_sp_port;
4261 }
4262
mlxsw_sp_lower_get(struct net_device * dev)4263 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4264 {
4265 struct mlxsw_sp_port *mlxsw_sp_port;
4266
4267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4268 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4269 }
4270
mlxsw_sp_port_dev_lower_find_rcu(struct net_device * dev)4271 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4272 {
4273 struct mlxsw_sp_port *mlxsw_sp_port;
4274
4275 if (mlxsw_sp_port_dev_check(dev))
4276 return netdev_priv(dev);
4277
4278 mlxsw_sp_port = NULL;
4279 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4280 &mlxsw_sp_port);
4281
4282 return mlxsw_sp_port;
4283 }
4284
mlxsw_sp_port_lower_dev_hold(struct net_device * dev)4285 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4286 {
4287 struct mlxsw_sp_port *mlxsw_sp_port;
4288
4289 rcu_read_lock();
4290 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4291 if (mlxsw_sp_port)
4292 dev_hold(mlxsw_sp_port->dev);
4293 rcu_read_unlock();
4294 return mlxsw_sp_port;
4295 }
4296
mlxsw_sp_port_dev_put(struct mlxsw_sp_port * mlxsw_sp_port)4297 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4298 {
4299 dev_put(mlxsw_sp_port->dev);
4300 }
4301
4302 static void
mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4303 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4304 struct net_device *lag_dev)
4305 {
4306 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4307 struct net_device *upper_dev;
4308 struct list_head *iter;
4309
4310 if (netif_is_bridge_port(lag_dev))
4311 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4312
4313 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4314 if (!netif_is_bridge_port(upper_dev))
4315 continue;
4316 br_dev = netdev_master_upper_dev_get(upper_dev);
4317 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4318 }
4319 }
4320
mlxsw_sp_lag_create(struct mlxsw_sp * mlxsw_sp,u16 lag_id)4321 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4322 {
4323 char sldr_pl[MLXSW_REG_SLDR_LEN];
4324
4325 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4326 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4327 }
4328
mlxsw_sp_lag_destroy(struct mlxsw_sp * mlxsw_sp,u16 lag_id)4329 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4330 {
4331 char sldr_pl[MLXSW_REG_SLDR_LEN];
4332
4333 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4334 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4335 }
4336
mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id,u8 port_index)4337 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4338 u16 lag_id, u8 port_index)
4339 {
4340 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4341 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4342
4343 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4344 lag_id, port_index);
4345 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4346 }
4347
mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4348 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4349 u16 lag_id)
4350 {
4351 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4352 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4353
4354 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4355 lag_id);
4356 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4357 }
4358
mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4359 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4360 u16 lag_id)
4361 {
4362 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4363 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4364
4365 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4366 lag_id);
4367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4368 }
4369
mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4370 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4371 u16 lag_id)
4372 {
4373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4374 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4375
4376 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4377 lag_id);
4378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4379 }
4380
mlxsw_sp_lag_index_get(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,u16 * p_lag_id)4381 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4382 struct net_device *lag_dev,
4383 u16 *p_lag_id)
4384 {
4385 struct mlxsw_sp_upper *lag;
4386 int free_lag_id = -1;
4387 u64 max_lag;
4388 int i;
4389
4390 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4391 for (i = 0; i < max_lag; i++) {
4392 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4393 if (lag->ref_count) {
4394 if (lag->dev == lag_dev) {
4395 *p_lag_id = i;
4396 return 0;
4397 }
4398 } else if (free_lag_id < 0) {
4399 free_lag_id = i;
4400 }
4401 }
4402 if (free_lag_id < 0)
4403 return -EBUSY;
4404 *p_lag_id = free_lag_id;
4405 return 0;
4406 }
4407
4408 static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netdev_lag_upper_info * lag_upper_info,struct netlink_ext_ack * extack)4409 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4410 struct net_device *lag_dev,
4411 struct netdev_lag_upper_info *lag_upper_info,
4412 struct netlink_ext_ack *extack)
4413 {
4414 u16 lag_id;
4415
4416 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4417 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4418 return false;
4419 }
4420 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4421 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4422 return false;
4423 }
4424 return true;
4425 }
4426
mlxsw_sp_port_lag_index_get(struct mlxsw_sp * mlxsw_sp,u16 lag_id,u8 * p_port_index)4427 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4428 u16 lag_id, u8 *p_port_index)
4429 {
4430 u64 max_lag_members;
4431 int i;
4432
4433 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4434 MAX_LAG_MEMBERS);
4435 for (i = 0; i < max_lag_members; i++) {
4436 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4437 *p_port_index = i;
4438 return 0;
4439 }
4440 }
4441 return -EBUSY;
4442 }
4443
mlxsw_sp_port_lag_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4444 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4445 struct net_device *lag_dev)
4446 {
4447 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4448 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4449 struct mlxsw_sp_upper *lag;
4450 u16 lag_id;
4451 u8 port_index;
4452 int err;
4453
4454 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4455 if (err)
4456 return err;
4457 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4458 if (!lag->ref_count) {
4459 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4460 if (err)
4461 return err;
4462 lag->dev = lag_dev;
4463 }
4464
4465 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4466 if (err)
4467 return err;
4468 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4469 if (err)
4470 goto err_col_port_add;
4471
4472 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4473 mlxsw_sp_port->local_port);
4474 mlxsw_sp_port->lag_id = lag_id;
4475 mlxsw_sp_port->lagged = 1;
4476 lag->ref_count++;
4477
4478 /* Port is no longer usable as a router interface */
4479 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4480 if (mlxsw_sp_port_vlan->fid)
4481 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4482
4483 return 0;
4484
4485 err_col_port_add:
4486 if (!lag->ref_count)
4487 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4488 return err;
4489 }
4490
mlxsw_sp_port_lag_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4491 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4492 struct net_device *lag_dev)
4493 {
4494 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4495 u16 lag_id = mlxsw_sp_port->lag_id;
4496 struct mlxsw_sp_upper *lag;
4497
4498 if (!mlxsw_sp_port->lagged)
4499 return;
4500 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4501 WARN_ON(lag->ref_count == 0);
4502
4503 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4504
4505 /* Any VLANs configured on the port are no longer valid */
4506 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4507 /* Make the LAG and its directly linked uppers leave bridges they
4508 * are memeber in
4509 */
4510 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4511
4512 if (lag->ref_count == 1)
4513 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4514
4515 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4516 mlxsw_sp_port->local_port);
4517 mlxsw_sp_port->lagged = 0;
4518 lag->ref_count--;
4519
4520 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4521 /* Make sure untagged frames are allowed to ingress */
4522 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4523 }
4524
mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4525 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4526 u16 lag_id)
4527 {
4528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4529 char sldr_pl[MLXSW_REG_SLDR_LEN];
4530
4531 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4532 mlxsw_sp_port->local_port);
4533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4534 }
4535
mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4536 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4537 u16 lag_id)
4538 {
4539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4540 char sldr_pl[MLXSW_REG_SLDR_LEN];
4541
4542 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4543 mlxsw_sp_port->local_port);
4544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4545 }
4546
4547 static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port * mlxsw_sp_port)4548 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4549 {
4550 int err;
4551
4552 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4553 mlxsw_sp_port->lag_id);
4554 if (err)
4555 return err;
4556
4557 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4558 if (err)
4559 goto err_dist_port_add;
4560
4561 return 0;
4562
4563 err_dist_port_add:
4564 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4565 return err;
4566 }
4567
4568 static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port * mlxsw_sp_port)4569 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4570 {
4571 int err;
4572
4573 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4574 mlxsw_sp_port->lag_id);
4575 if (err)
4576 return err;
4577
4578 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4579 mlxsw_sp_port->lag_id);
4580 if (err)
4581 goto err_col_port_disable;
4582
4583 return 0;
4584
4585 err_col_port_disable:
4586 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4587 return err;
4588 }
4589
mlxsw_sp_port_lag_changed(struct mlxsw_sp_port * mlxsw_sp_port,struct netdev_lag_lower_state_info * info)4590 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4591 struct netdev_lag_lower_state_info *info)
4592 {
4593 if (info->tx_enabled)
4594 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4595 else
4596 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4597 }
4598
mlxsw_sp_port_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)4599 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4600 bool enable)
4601 {
4602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4603 enum mlxsw_reg_spms_state spms_state;
4604 char *spms_pl;
4605 u16 vid;
4606 int err;
4607
4608 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4609 MLXSW_REG_SPMS_STATE_DISCARDING;
4610
4611 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4612 if (!spms_pl)
4613 return -ENOMEM;
4614 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4615
4616 for (vid = 0; vid < VLAN_N_VID; vid++)
4617 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4618
4619 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4620 kfree(spms_pl);
4621 return err;
4622 }
4623
mlxsw_sp_port_ovs_join(struct mlxsw_sp_port * mlxsw_sp_port)4624 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4625 {
4626 u16 vid = 1;
4627 int err;
4628
4629 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4630 if (err)
4631 return err;
4632 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4633 if (err)
4634 goto err_port_stp_set;
4635 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4636 true, false);
4637 if (err)
4638 goto err_port_vlan_set;
4639
4640 for (; vid <= VLAN_N_VID - 1; vid++) {
4641 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4642 vid, false);
4643 if (err)
4644 goto err_vid_learning_set;
4645 }
4646
4647 return 0;
4648
4649 err_vid_learning_set:
4650 for (vid--; vid >= 1; vid--)
4651 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4652 err_port_vlan_set:
4653 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4654 err_port_stp_set:
4655 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4656 return err;
4657 }
4658
mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port * mlxsw_sp_port)4659 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4660 {
4661 u16 vid;
4662
4663 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4664 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4665 vid, true);
4666
4667 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4668 false, false);
4669 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4670 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4671 }
4672
mlxsw_sp_netdevice_port_upper_event(struct net_device * lower_dev,struct net_device * dev,unsigned long event,void * ptr)4673 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4674 struct net_device *dev,
4675 unsigned long event, void *ptr)
4676 {
4677 struct netdev_notifier_changeupper_info *info;
4678 struct mlxsw_sp_port *mlxsw_sp_port;
4679 struct netlink_ext_ack *extack;
4680 struct net_device *upper_dev;
4681 struct mlxsw_sp *mlxsw_sp;
4682 int err = 0;
4683
4684 mlxsw_sp_port = netdev_priv(dev);
4685 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4686 info = ptr;
4687 extack = netdev_notifier_info_to_extack(&info->info);
4688
4689 switch (event) {
4690 case NETDEV_PRECHANGEUPPER:
4691 upper_dev = info->upper_dev;
4692 if (!is_vlan_dev(upper_dev) &&
4693 !netif_is_lag_master(upper_dev) &&
4694 !netif_is_bridge_master(upper_dev) &&
4695 !netif_is_ovs_master(upper_dev) &&
4696 !netif_is_macvlan(upper_dev)) {
4697 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4698 return -EINVAL;
4699 }
4700 if (!info->linking)
4701 break;
4702 if (netdev_has_any_upper_dev(upper_dev) &&
4703 (!netif_is_bridge_master(upper_dev) ||
4704 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4705 upper_dev))) {
4706 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4707 return -EINVAL;
4708 }
4709 if (netif_is_lag_master(upper_dev) &&
4710 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4711 info->upper_info, extack))
4712 return -EINVAL;
4713 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4714 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4715 return -EINVAL;
4716 }
4717 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4718 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4719 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4720 return -EINVAL;
4721 }
4722 if (netif_is_macvlan(upper_dev) &&
4723 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
4724 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4725 return -EOPNOTSUPP;
4726 }
4727 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4728 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4729 return -EINVAL;
4730 }
4731 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4732 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4733 return -EINVAL;
4734 }
4735 if (is_vlan_dev(upper_dev) &&
4736 vlan_dev_vlan_id(upper_dev) == 1) {
4737 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
4738 return -EINVAL;
4739 }
4740 break;
4741 case NETDEV_CHANGEUPPER:
4742 upper_dev = info->upper_dev;
4743 if (netif_is_bridge_master(upper_dev)) {
4744 if (info->linking)
4745 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4746 lower_dev,
4747 upper_dev,
4748 extack);
4749 else
4750 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4751 lower_dev,
4752 upper_dev);
4753 } else if (netif_is_lag_master(upper_dev)) {
4754 if (info->linking) {
4755 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4756 upper_dev);
4757 } else {
4758 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4759 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4760 upper_dev);
4761 }
4762 } else if (netif_is_ovs_master(upper_dev)) {
4763 if (info->linking)
4764 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4765 else
4766 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4767 } else if (netif_is_macvlan(upper_dev)) {
4768 if (!info->linking)
4769 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4770 } else if (is_vlan_dev(upper_dev)) {
4771 struct net_device *br_dev;
4772
4773 if (!netif_is_bridge_port(upper_dev))
4774 break;
4775 if (info->linking)
4776 break;
4777 br_dev = netdev_master_upper_dev_get(upper_dev);
4778 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4779 br_dev);
4780 }
4781 break;
4782 }
4783
4784 return err;
4785 }
4786
mlxsw_sp_netdevice_port_lower_event(struct net_device * dev,unsigned long event,void * ptr)4787 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4788 unsigned long event, void *ptr)
4789 {
4790 struct netdev_notifier_changelowerstate_info *info;
4791 struct mlxsw_sp_port *mlxsw_sp_port;
4792 int err;
4793
4794 mlxsw_sp_port = netdev_priv(dev);
4795 info = ptr;
4796
4797 switch (event) {
4798 case NETDEV_CHANGELOWERSTATE:
4799 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4800 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4801 info->lower_state_info);
4802 if (err)
4803 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4804 }
4805 break;
4806 }
4807
4808 return 0;
4809 }
4810
mlxsw_sp_netdevice_port_event(struct net_device * lower_dev,struct net_device * port_dev,unsigned long event,void * ptr)4811 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4812 struct net_device *port_dev,
4813 unsigned long event, void *ptr)
4814 {
4815 switch (event) {
4816 case NETDEV_PRECHANGEUPPER:
4817 case NETDEV_CHANGEUPPER:
4818 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4819 event, ptr);
4820 case NETDEV_CHANGELOWERSTATE:
4821 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4822 ptr);
4823 }
4824
4825 return 0;
4826 }
4827
mlxsw_sp_netdevice_lag_event(struct net_device * lag_dev,unsigned long event,void * ptr)4828 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4829 unsigned long event, void *ptr)
4830 {
4831 struct net_device *dev;
4832 struct list_head *iter;
4833 int ret;
4834
4835 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4836 if (mlxsw_sp_port_dev_check(dev)) {
4837 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4838 ptr);
4839 if (ret)
4840 return ret;
4841 }
4842 }
4843
4844 return 0;
4845 }
4846
mlxsw_sp_netdevice_port_vlan_event(struct net_device * vlan_dev,struct net_device * dev,unsigned long event,void * ptr,u16 vid)4847 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4848 struct net_device *dev,
4849 unsigned long event, void *ptr,
4850 u16 vid)
4851 {
4852 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4853 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4854 struct netdev_notifier_changeupper_info *info = ptr;
4855 struct netlink_ext_ack *extack;
4856 struct net_device *upper_dev;
4857 int err = 0;
4858
4859 extack = netdev_notifier_info_to_extack(&info->info);
4860
4861 switch (event) {
4862 case NETDEV_PRECHANGEUPPER:
4863 upper_dev = info->upper_dev;
4864 if (!netif_is_bridge_master(upper_dev) &&
4865 !netif_is_macvlan(upper_dev)) {
4866 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4867 return -EINVAL;
4868 }
4869 if (!info->linking)
4870 break;
4871 if (netdev_has_any_upper_dev(upper_dev) &&
4872 (!netif_is_bridge_master(upper_dev) ||
4873 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4874 upper_dev))) {
4875 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4876 return -EINVAL;
4877 }
4878 if (netif_is_macvlan(upper_dev) &&
4879 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
4880 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4881 return -EOPNOTSUPP;
4882 }
4883 break;
4884 case NETDEV_CHANGEUPPER:
4885 upper_dev = info->upper_dev;
4886 if (netif_is_bridge_master(upper_dev)) {
4887 if (info->linking)
4888 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4889 vlan_dev,
4890 upper_dev,
4891 extack);
4892 else
4893 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4894 vlan_dev,
4895 upper_dev);
4896 } else if (netif_is_macvlan(upper_dev)) {
4897 if (!info->linking)
4898 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4899 } else {
4900 err = -EINVAL;
4901 WARN_ON(1);
4902 }
4903 break;
4904 }
4905
4906 return err;
4907 }
4908
mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device * vlan_dev,struct net_device * lag_dev,unsigned long event,void * ptr,u16 vid)4909 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4910 struct net_device *lag_dev,
4911 unsigned long event,
4912 void *ptr, u16 vid)
4913 {
4914 struct net_device *dev;
4915 struct list_head *iter;
4916 int ret;
4917
4918 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4919 if (mlxsw_sp_port_dev_check(dev)) {
4920 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4921 event, ptr,
4922 vid);
4923 if (ret)
4924 return ret;
4925 }
4926 }
4927
4928 return 0;
4929 }
4930
mlxsw_sp_netdevice_vlan_event(struct net_device * vlan_dev,unsigned long event,void * ptr)4931 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4932 unsigned long event, void *ptr)
4933 {
4934 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4935 u16 vid = vlan_dev_vlan_id(vlan_dev);
4936
4937 if (mlxsw_sp_port_dev_check(real_dev))
4938 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4939 event, ptr, vid);
4940 else if (netif_is_lag_master(real_dev))
4941 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4942 real_dev, event,
4943 ptr, vid);
4944
4945 return 0;
4946 }
4947
mlxsw_sp_netdevice_bridge_event(struct net_device * br_dev,unsigned long event,void * ptr)4948 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4949 unsigned long event, void *ptr)
4950 {
4951 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4952 struct netdev_notifier_changeupper_info *info = ptr;
4953 struct netlink_ext_ack *extack;
4954 struct net_device *upper_dev;
4955
4956 if (!mlxsw_sp)
4957 return 0;
4958
4959 extack = netdev_notifier_info_to_extack(&info->info);
4960
4961 switch (event) {
4962 case NETDEV_PRECHANGEUPPER:
4963 upper_dev = info->upper_dev;
4964 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4965 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4966 return -EOPNOTSUPP;
4967 }
4968 if (!info->linking)
4969 break;
4970 if (netif_is_macvlan(upper_dev) &&
4971 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
4972 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4973 return -EOPNOTSUPP;
4974 }
4975 break;
4976 case NETDEV_CHANGEUPPER:
4977 upper_dev = info->upper_dev;
4978 if (info->linking)
4979 break;
4980 if (is_vlan_dev(upper_dev))
4981 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4982 if (netif_is_macvlan(upper_dev))
4983 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4984 break;
4985 }
4986
4987 return 0;
4988 }
4989
mlxsw_sp_netdevice_macvlan_event(struct net_device * macvlan_dev,unsigned long event,void * ptr)4990 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4991 unsigned long event, void *ptr)
4992 {
4993 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4994 struct netdev_notifier_changeupper_info *info = ptr;
4995 struct netlink_ext_ack *extack;
4996
4997 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4998 return 0;
4999
5000 extack = netdev_notifier_info_to_extack(&info->info);
5001
5002 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
5003 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5004
5005 return -EOPNOTSUPP;
5006 }
5007
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)5008 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
5009 {
5010 struct netdev_notifier_changeupper_info *info = ptr;
5011
5012 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
5013 return false;
5014 return netif_is_l3_master(info->upper_dev);
5015 }
5016
mlxsw_sp_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)5017 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5018 unsigned long event, void *ptr)
5019 {
5020 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5021 struct mlxsw_sp_span_entry *span_entry;
5022 struct mlxsw_sp *mlxsw_sp;
5023 int err = 0;
5024
5025 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5026 if (event == NETDEV_UNREGISTER) {
5027 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5028 if (span_entry)
5029 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5030 }
5031 mlxsw_sp_span_respin(mlxsw_sp);
5032
5033 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
5034 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
5035 event, ptr);
5036 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
5037 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
5038 event, ptr);
5039 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
5040 err = mlxsw_sp_netdevice_router_port_event(dev);
5041 else if (mlxsw_sp_is_vrf_event(event, ptr))
5042 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
5043 else if (mlxsw_sp_port_dev_check(dev))
5044 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5045 else if (netif_is_lag_master(dev))
5046 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5047 else if (is_vlan_dev(dev))
5048 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5049 else if (netif_is_bridge_master(dev))
5050 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5051 else if (netif_is_macvlan(dev))
5052 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5053
5054 return notifier_from_errno(err);
5055 }
5056
5057 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5058 .notifier_call = mlxsw_sp_inetaddr_valid_event,
5059 };
5060
5061 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
5062 .notifier_call = mlxsw_sp_inetaddr_event,
5063 };
5064
5065 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5066 .notifier_call = mlxsw_sp_inet6addr_valid_event,
5067 };
5068
5069 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
5070 .notifier_call = mlxsw_sp_inet6addr_event,
5071 };
5072
5073 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5074 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5075 {0, },
5076 };
5077
5078 static struct pci_driver mlxsw_sp1_pci_driver = {
5079 .name = mlxsw_sp1_driver_name,
5080 .id_table = mlxsw_sp1_pci_id_table,
5081 };
5082
5083 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5084 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5085 {0, },
5086 };
5087
5088 static struct pci_driver mlxsw_sp2_pci_driver = {
5089 .name = mlxsw_sp2_driver_name,
5090 .id_table = mlxsw_sp2_pci_id_table,
5091 };
5092
mlxsw_sp_module_init(void)5093 static int __init mlxsw_sp_module_init(void)
5094 {
5095 int err;
5096
5097 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5098 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
5099 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5100 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
5101
5102 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5103 if (err)
5104 goto err_sp1_core_driver_register;
5105
5106 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5107 if (err)
5108 goto err_sp2_core_driver_register;
5109
5110 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5111 if (err)
5112 goto err_sp1_pci_driver_register;
5113
5114 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5115 if (err)
5116 goto err_sp2_pci_driver_register;
5117
5118 return 0;
5119
5120 err_sp2_pci_driver_register:
5121 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5122 err_sp1_pci_driver_register:
5123 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5124 err_sp2_core_driver_register:
5125 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5126 err_sp1_core_driver_register:
5127 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
5128 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5129 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
5130 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5131 return err;
5132 }
5133
mlxsw_sp_module_exit(void)5134 static void __exit mlxsw_sp_module_exit(void)
5135 {
5136 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5137 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5138 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5139 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5140 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
5141 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5142 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
5143 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5144 }
5145
5146 module_init(mlxsw_sp_module_init);
5147 module_exit(mlxsw_sp_module_exit);
5148
5149 MODULE_LICENSE("Dual BSD/GPL");
5150 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5151 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5152 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5153 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5154 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5155