1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
61 #include <net/addrconf.h>
62
63 #include "spectrum.h"
64 #include "pci.h"
65 #include "core.h"
66 #include "reg.h"
67 #include "port.h"
68 #include "trap.h"
69 #include "txheader.h"
70 #include "spectrum_cnt.h"
71 #include "spectrum_dpipe.h"
72 #include "../mlxfw/mlxfw.h"
73
74 #define MLXSW_FWREV_MAJOR 13
75 #define MLXSW_FWREV_MINOR 1420
76 #define MLXSW_FWREV_SUBMINOR 122
77
78 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 .major = MLXSW_FWREV_MAJOR,
80 .minor = MLXSW_FWREV_MINOR,
81 .subminor = MLXSW_FWREV_SUBMINOR
82 };
83
84 #define MLXSW_SP_FW_FILENAME \
85 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
86 "." __stringify(MLXSW_FWREV_MINOR) \
87 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88
89 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
90 static const char mlxsw_sp_driver_version[] = "1.0";
91
92 /* tx_hdr_version
93 * Tx header version.
94 * Must be set to 1.
95 */
96 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97
98 /* tx_hdr_ctl
99 * Packet control type.
100 * 0 - Ethernet control (e.g. EMADs, LACP)
101 * 1 - Ethernet data
102 */
103 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104
105 /* tx_hdr_proto
106 * Packet protocol type. Must be set to 1 (Ethernet).
107 */
108 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109
110 /* tx_hdr_rx_is_router
111 * Packet is sent from the router. Valid for data packets only.
112 */
113 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114
115 /* tx_hdr_fid_valid
116 * Indicates if the 'fid' field is valid and should be used for
117 * forwarding lookup. Valid for data packets only.
118 */
119 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120
121 /* tx_hdr_swid
122 * Switch partition ID. Must be set to 0.
123 */
124 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125
126 /* tx_hdr_control_tclass
127 * Indicates if the packet should use the control TClass and not one
128 * of the data TClasses.
129 */
130 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131
132 /* tx_hdr_etclass
133 * Egress TClass to be used on the egress device on the egress port.
134 */
135 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136
137 /* tx_hdr_port_mid
138 * Destination local port for unicast packets.
139 * Destination multicast ID for multicast packets.
140 *
141 * Control packets are directed to a specific egress port, while data
142 * packets are transmitted through the CPU port (0) into the switch partition,
143 * where forwarding rules are applied.
144 */
145 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146
147 /* tx_hdr_fid
148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
149 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
150 * Valid for data packets only.
151 */
152 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
153
154 /* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
160 struct mlxsw_sp_mlxfw_dev {
161 struct mlxfw_dev mlxfw_dev;
162 struct mlxsw_sp *mlxsw_sp;
163 };
164
mlxsw_sp_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)165 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
166 u16 component_index, u32 *p_max_size,
167 u8 *p_align_bits, u16 *p_max_write_size)
168 {
169 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
170 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
172 char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 int err;
174
175 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 if (err)
178 return err;
179 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 p_max_write_size);
181
182 *p_align_bits = max_t(u8, *p_align_bits, 2);
183 *p_max_write_size = min_t(u16, *p_max_write_size,
184 MLXSW_REG_MCDA_MAX_DATA_LEN);
185 return 0;
186 }
187
mlxsw_sp_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)188 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189 {
190 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
191 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
193 char mcc_pl[MLXSW_REG_MCC_LEN];
194 u8 control_state;
195 int err;
196
197 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 if (err)
200 return err;
201
202 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
203 if (control_state != MLXFW_FSM_STATE_IDLE)
204 return -EBUSY;
205
206 mlxsw_reg_mcc_pack(mcc_pl,
207 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 0, *fwhandle, 0);
209 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210 }
211
mlxsw_sp_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)212 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
213 u32 fwhandle, u16 component_index,
214 u32 component_size)
215 {
216 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
217 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
219 char mcc_pl[MLXSW_REG_MCC_LEN];
220
221 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
222 component_index, fwhandle, component_size);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224 }
225
mlxsw_sp_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)226 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
227 u32 fwhandle, u8 *data, u16 size,
228 u32 offset)
229 {
230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
233 char mcda_pl[MLXSW_REG_MCDA_LEN];
234
235 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237 }
238
mlxsw_sp_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)239 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u16 component_index)
241 {
242 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
243 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
245 char mcc_pl[MLXSW_REG_MCC_LEN];
246
247 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
248 component_index, fwhandle, 0);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250 }
251
mlxsw_sp_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)252 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253 {
254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
257 char mcc_pl[MLXSW_REG_MCC_LEN];
258
259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 fwhandle, 0);
261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262 }
263
mlxsw_sp_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)264 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
265 enum mlxfw_fsm_state *fsm_state,
266 enum mlxfw_fsm_state_err *fsm_state_err)
267 {
268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
271 char mcc_pl[MLXSW_REG_MCC_LEN];
272 u8 control_state;
273 u8 error_code;
274 int err;
275
276 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
277 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
278 if (err)
279 return err;
280
281 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
282 *fsm_state = control_state;
283 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
284 MLXFW_FSM_STATE_ERR_MAX);
285 return 0;
286 }
287
mlxsw_sp_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)288 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289 {
290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
293 char mcc_pl[MLXSW_REG_MCC_LEN];
294
295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 fwhandle, 0);
297 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298 }
299
mlxsw_sp_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)300 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301 {
302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
305 char mcc_pl[MLXSW_REG_MCC_LEN];
306
307 mlxsw_reg_mcc_pack(mcc_pl,
308 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 fwhandle, 0);
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311 }
312
313 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
314 .component_query = mlxsw_sp_component_query,
315 .fsm_lock = mlxsw_sp_fsm_lock,
316 .fsm_component_update = mlxsw_sp_fsm_component_update,
317 .fsm_block_download = mlxsw_sp_fsm_block_download,
318 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
319 .fsm_activate = mlxsw_sp_fsm_activate,
320 .fsm_query_state = mlxsw_sp_fsm_query_state,
321 .fsm_cancel = mlxsw_sp_fsm_cancel,
322 .fsm_release = mlxsw_sp_fsm_release
323 };
324
mlxsw_sp_firmware_flash(struct mlxsw_sp * mlxsw_sp,const struct firmware * firmware)325 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
326 const struct firmware *firmware)
327 {
328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
329 .mlxfw_dev = {
330 .ops = &mlxsw_sp_mlxfw_dev_ops,
331 .psid = mlxsw_sp->bus_info->psid,
332 .psid_size = strlen(mlxsw_sp->bus_info->psid),
333 },
334 .mlxsw_sp = mlxsw_sp
335 };
336 int err;
337
338 mlxsw_core_fw_flash_start(mlxsw_sp->core);
339 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
340 mlxsw_core_fw_flash_end(mlxsw_sp->core);
341
342 return err;
343 }
344
mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev * a,const struct mlxsw_fw_rev * b)345 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
346 const struct mlxsw_fw_rev *b)
347 {
348 if (a->major != b->major)
349 return a->major > b->major;
350 if (a->minor != b->minor)
351 return a->minor > b->minor;
352 return a->subminor >= b->subminor;
353 }
354
mlxsw_sp_fw_rev_validate(struct mlxsw_sp * mlxsw_sp)355 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
356 {
357 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
358 const struct firmware *firmware;
359 int err;
360
361 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
362 return 0;
363
364 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
365 rev->major, rev->minor, rev->subminor);
366 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
367 MLXSW_SP_FW_FILENAME);
368
369 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
370 mlxsw_sp->bus_info->dev);
371 if (err) {
372 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
373 MLXSW_SP_FW_FILENAME);
374 return err;
375 }
376
377 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
378 release_firmware(firmware);
379 return err;
380 }
381
mlxsw_sp_flow_counter_get(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index,u64 * packets,u64 * bytes)382 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
383 unsigned int counter_index, u64 *packets,
384 u64 *bytes)
385 {
386 char mgpc_pl[MLXSW_REG_MGPC_LEN];
387 int err;
388
389 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
390 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
391 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
392 if (err)
393 return err;
394 if (packets)
395 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
396 if (bytes)
397 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
398 return 0;
399 }
400
mlxsw_sp_flow_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)401 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
402 unsigned int counter_index)
403 {
404 char mgpc_pl[MLXSW_REG_MGPC_LEN];
405
406 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
407 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
408 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
409 }
410
mlxsw_sp_flow_counter_alloc(struct mlxsw_sp * mlxsw_sp,unsigned int * p_counter_index)411 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
412 unsigned int *p_counter_index)
413 {
414 int err;
415
416 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
417 p_counter_index);
418 if (err)
419 return err;
420 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
421 if (err)
422 goto err_counter_clear;
423 return 0;
424
425 err_counter_clear:
426 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
427 *p_counter_index);
428 return err;
429 }
430
mlxsw_sp_flow_counter_free(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)431 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
432 unsigned int counter_index)
433 {
434 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
435 counter_index);
436 }
437
mlxsw_sp_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)438 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
439 const struct mlxsw_tx_info *tx_info)
440 {
441 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
442
443 memset(txhdr, 0, MLXSW_TXHDR_LEN);
444
445 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
446 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
447 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
448 mlxsw_tx_hdr_swid_set(txhdr, 0);
449 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
450 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
451 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
452 }
453
mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u8 state)454 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
455 u8 state)
456 {
457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458 enum mlxsw_reg_spms_state spms_state;
459 char *spms_pl;
460 int err;
461
462 switch (state) {
463 case BR_STATE_FORWARDING:
464 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
465 break;
466 case BR_STATE_LEARNING:
467 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
468 break;
469 case BR_STATE_LISTENING: /* fall-through */
470 case BR_STATE_DISABLED: /* fall-through */
471 case BR_STATE_BLOCKING:
472 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
473 break;
474 default:
475 BUG();
476 }
477
478 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
479 if (!spms_pl)
480 return -ENOMEM;
481 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
482 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
483
484 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
485 kfree(spms_pl);
486 return err;
487 }
488
mlxsw_sp_base_mac_get(struct mlxsw_sp * mlxsw_sp)489 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
490 {
491 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
492 int err;
493
494 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
495 if (err)
496 return err;
497 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
498 return 0;
499 }
500
mlxsw_sp_span_init(struct mlxsw_sp * mlxsw_sp)501 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
502 {
503 int i;
504
505 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
506 return -EIO;
507
508 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
509 MAX_SPAN);
510 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
511 sizeof(struct mlxsw_sp_span_entry),
512 GFP_KERNEL);
513 if (!mlxsw_sp->span.entries)
514 return -ENOMEM;
515
516 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
517 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
518
519 return 0;
520 }
521
mlxsw_sp_span_fini(struct mlxsw_sp * mlxsw_sp)522 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
523 {
524 int i;
525
526 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
527 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
528
529 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
530 }
531 kfree(mlxsw_sp->span.entries);
532 }
533
534 static struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_create(struct mlxsw_sp_port * port)535 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
536 {
537 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
538 struct mlxsw_sp_span_entry *span_entry;
539 char mpat_pl[MLXSW_REG_MPAT_LEN];
540 u8 local_port = port->local_port;
541 int index;
542 int i;
543 int err;
544
545 /* find a free entry to use */
546 index = -1;
547 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
548 if (!mlxsw_sp->span.entries[i].used) {
549 index = i;
550 span_entry = &mlxsw_sp->span.entries[i];
551 break;
552 }
553 }
554 if (index < 0)
555 return NULL;
556
557 /* create a new port analayzer entry for local_port */
558 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
559 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
560 if (err)
561 return NULL;
562
563 span_entry->used = true;
564 span_entry->id = index;
565 span_entry->ref_count = 1;
566 span_entry->local_port = local_port;
567 return span_entry;
568 }
569
mlxsw_sp_span_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_span_entry * span_entry)570 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
571 struct mlxsw_sp_span_entry *span_entry)
572 {
573 u8 local_port = span_entry->local_port;
574 char mpat_pl[MLXSW_REG_MPAT_LEN];
575 int pa_id = span_entry->id;
576
577 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
578 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
579 span_entry->used = false;
580 }
581
582 static struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp * mlxsw_sp,u8 local_port)583 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
584 {
585 int i;
586
587 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
588 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
589
590 if (curr->used && curr->local_port == local_port)
591 return curr;
592 }
593 return NULL;
594 }
595
596 static struct mlxsw_sp_span_entry
mlxsw_sp_span_entry_get(struct mlxsw_sp_port * port)597 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
598 {
599 struct mlxsw_sp_span_entry *span_entry;
600
601 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
602 port->local_port);
603 if (span_entry) {
604 /* Already exists, just take a reference */
605 span_entry->ref_count++;
606 return span_entry;
607 }
608
609 return mlxsw_sp_span_entry_create(port);
610 }
611
mlxsw_sp_span_entry_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_span_entry * span_entry)612 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
613 struct mlxsw_sp_span_entry *span_entry)
614 {
615 WARN_ON(!span_entry->ref_count);
616 if (--span_entry->ref_count == 0)
617 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
618 return 0;
619 }
620
mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port * port)621 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
622 {
623 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
624 struct mlxsw_sp_span_inspected_port *p;
625 int i;
626
627 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
628 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
629
630 list_for_each_entry(p, &curr->bound_ports_list, list)
631 if (p->local_port == port->local_port &&
632 p->type == MLXSW_SP_SPAN_EGRESS)
633 return true;
634 }
635
636 return false;
637 }
638
mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp * mlxsw_sp,int mtu)639 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
640 int mtu)
641 {
642 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
643 }
644
mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port * port,u16 mtu)645 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
646 {
647 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
648 char sbib_pl[MLXSW_REG_SBIB_LEN];
649 int err;
650
651 /* If port is egress mirrored, the shared buffer size should be
652 * updated according to the mtu value
653 */
654 if (mlxsw_sp_span_is_egress_mirror(port)) {
655 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
656
657 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
658 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
659 if (err) {
660 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
661 return err;
662 }
663 }
664
665 return 0;
666 }
667
668 static struct mlxsw_sp_span_inspected_port *
mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port * port,struct mlxsw_sp_span_entry * span_entry)669 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
670 struct mlxsw_sp_span_entry *span_entry)
671 {
672 struct mlxsw_sp_span_inspected_port *p;
673
674 list_for_each_entry(p, &span_entry->bound_ports_list, list)
675 if (port->local_port == p->local_port)
676 return p;
677 return NULL;
678 }
679
680 static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port * port,struct mlxsw_sp_span_entry * span_entry,enum mlxsw_sp_span_type type)681 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
682 struct mlxsw_sp_span_entry *span_entry,
683 enum mlxsw_sp_span_type type)
684 {
685 struct mlxsw_sp_span_inspected_port *inspected_port;
686 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
687 char mpar_pl[MLXSW_REG_MPAR_LEN];
688 char sbib_pl[MLXSW_REG_SBIB_LEN];
689 int pa_id = span_entry->id;
690 int err;
691
692 /* if it is an egress SPAN, bind a shared buffer to it */
693 if (type == MLXSW_SP_SPAN_EGRESS) {
694 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
695 port->dev->mtu);
696
697 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
698 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
699 if (err) {
700 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
701 return err;
702 }
703 }
704
705 /* bind the port to the SPAN entry */
706 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
707 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
708 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
709 if (err)
710 goto err_mpar_reg_write;
711
712 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
713 if (!inspected_port) {
714 err = -ENOMEM;
715 goto err_inspected_port_alloc;
716 }
717 inspected_port->local_port = port->local_port;
718 inspected_port->type = type;
719 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
720
721 return 0;
722
723 err_mpar_reg_write:
724 err_inspected_port_alloc:
725 if (type == MLXSW_SP_SPAN_EGRESS) {
726 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
727 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
728 }
729 return err;
730 }
731
732 static void
mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port * port,struct mlxsw_sp_span_entry * span_entry,enum mlxsw_sp_span_type type)733 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
734 struct mlxsw_sp_span_entry *span_entry,
735 enum mlxsw_sp_span_type type)
736 {
737 struct mlxsw_sp_span_inspected_port *inspected_port;
738 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
739 char mpar_pl[MLXSW_REG_MPAR_LEN];
740 char sbib_pl[MLXSW_REG_SBIB_LEN];
741 int pa_id = span_entry->id;
742
743 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
744 if (!inspected_port)
745 return;
746
747 /* remove the inspected port */
748 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
749 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
750 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
751
752 /* remove the SBIB buffer if it was egress SPAN */
753 if (type == MLXSW_SP_SPAN_EGRESS) {
754 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
755 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
756 }
757
758 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
759
760 list_del(&inspected_port->list);
761 kfree(inspected_port);
762 }
763
mlxsw_sp_span_mirror_add(struct mlxsw_sp_port * from,struct mlxsw_sp_port * to,enum mlxsw_sp_span_type type)764 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
765 struct mlxsw_sp_port *to,
766 enum mlxsw_sp_span_type type)
767 {
768 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
769 struct mlxsw_sp_span_entry *span_entry;
770 int err;
771
772 span_entry = mlxsw_sp_span_entry_get(to);
773 if (!span_entry)
774 return -ENOENT;
775
776 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
777 span_entry->id);
778
779 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
780 if (err)
781 goto err_port_bind;
782
783 return 0;
784
785 err_port_bind:
786 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
787 return err;
788 }
789
mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port * from,u8 destination_port,enum mlxsw_sp_span_type type)790 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
791 u8 destination_port,
792 enum mlxsw_sp_span_type type)
793 {
794 struct mlxsw_sp_span_entry *span_entry;
795
796 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
797 destination_port);
798 if (!span_entry) {
799 netdev_err(from->dev, "no span entry found\n");
800 return;
801 }
802
803 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
804 span_entry->id);
805 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
806 }
807
mlxsw_sp_port_sample_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable,u32 rate)808 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
809 bool enable, u32 rate)
810 {
811 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
812 char mpsc_pl[MLXSW_REG_MPSC_LEN];
813
814 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
815 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
816 }
817
mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_up)818 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
819 bool is_up)
820 {
821 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
822 char paos_pl[MLXSW_REG_PAOS_LEN];
823
824 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
825 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
826 MLXSW_PORT_ADMIN_STATUS_DOWN);
827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
828 }
829
mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port * mlxsw_sp_port,unsigned char * addr)830 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
831 unsigned char *addr)
832 {
833 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
834 char ppad_pl[MLXSW_REG_PPAD_LEN];
835
836 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
837 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
838 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
839 }
840
mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port * mlxsw_sp_port)841 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
842 {
843 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
844 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
845
846 ether_addr_copy(addr, mlxsw_sp->base_mac);
847 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
848 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
849 }
850
mlxsw_sp_port_mtu_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mtu)851 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
852 {
853 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
854 char pmtu_pl[MLXSW_REG_PMTU_LEN];
855 int max_mtu;
856 int err;
857
858 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
859 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
860 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
861 if (err)
862 return err;
863 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
864
865 if (mtu > max_mtu)
866 return -EINVAL;
867
868 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
870 }
871
mlxsw_sp_port_swid_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 swid)872 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
873 {
874 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
875 char pspa_pl[MLXSW_REG_PSPA_LEN];
876
877 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
878 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
879 }
880
mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)881 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
882 {
883 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
884 char svpe_pl[MLXSW_REG_SVPE_LEN];
885
886 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
887 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
888 }
889
mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool learn_enable)890 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
891 bool learn_enable)
892 {
893 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
894 char *spvmlr_pl;
895 int err;
896
897 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
898 if (!spvmlr_pl)
899 return -ENOMEM;
900 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
901 learn_enable);
902 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
903 kfree(spvmlr_pl);
904 return err;
905 }
906
__mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)907 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
908 u16 vid)
909 {
910 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
911 char spvid_pl[MLXSW_REG_SPVID_LEN];
912
913 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
914 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
915 }
916
mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port * mlxsw_sp_port,bool allow)917 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
918 bool allow)
919 {
920 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
921 char spaft_pl[MLXSW_REG_SPAFT_LEN];
922
923 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
924 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
925 }
926
mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)927 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
928 {
929 int err;
930
931 if (!vid) {
932 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
933 if (err)
934 return err;
935 } else {
936 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
937 if (err)
938 return err;
939 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
940 if (err)
941 goto err_port_allow_untagged_set;
942 }
943
944 mlxsw_sp_port->pvid = vid;
945 return 0;
946
947 err_port_allow_untagged_set:
948 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
949 return err;
950 }
951
952 static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port * mlxsw_sp_port)953 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
954 {
955 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
956 char sspr_pl[MLXSW_REG_SSPR_LEN];
957
958 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
959 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
960 }
961
mlxsw_sp_port_module_info_get(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 * p_module,u8 * p_width,u8 * p_lane)962 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
963 u8 local_port, u8 *p_module,
964 u8 *p_width, u8 *p_lane)
965 {
966 char pmlp_pl[MLXSW_REG_PMLP_LEN];
967 int err;
968
969 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
970 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
971 if (err)
972 return err;
973 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
974 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
975 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
976 return 0;
977 }
978
mlxsw_sp_port_module_map(struct mlxsw_sp_port * mlxsw_sp_port,u8 module,u8 width,u8 lane)979 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
980 u8 module, u8 width, u8 lane)
981 {
982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
983 char pmlp_pl[MLXSW_REG_PMLP_LEN];
984 int i;
985
986 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
987 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
988 for (i = 0; i < width; i++) {
989 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
990 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
991 }
992
993 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
994 }
995
mlxsw_sp_port_module_unmap(struct mlxsw_sp_port * mlxsw_sp_port)996 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
997 {
998 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
999 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1000
1001 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
1002 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
1003 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
1004 }
1005
mlxsw_sp_port_open(struct net_device * dev)1006 static int mlxsw_sp_port_open(struct net_device *dev)
1007 {
1008 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1009 int err;
1010
1011 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1012 if (err)
1013 return err;
1014 netif_start_queue(dev);
1015 return 0;
1016 }
1017
mlxsw_sp_port_stop(struct net_device * dev)1018 static int mlxsw_sp_port_stop(struct net_device *dev)
1019 {
1020 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1021
1022 netif_stop_queue(dev);
1023 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1024 }
1025
mlxsw_sp_port_xmit(struct sk_buff * skb,struct net_device * dev)1026 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1027 struct net_device *dev)
1028 {
1029 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1031 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1032 const struct mlxsw_tx_info tx_info = {
1033 .local_port = mlxsw_sp_port->local_port,
1034 .is_emad = false,
1035 };
1036 u64 len;
1037 int err;
1038
1039 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1040 return NETDEV_TX_BUSY;
1041
1042 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1043 struct sk_buff *skb_orig = skb;
1044
1045 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1046 if (!skb) {
1047 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1048 dev_kfree_skb_any(skb_orig);
1049 return NETDEV_TX_OK;
1050 }
1051 dev_consume_skb_any(skb_orig);
1052 }
1053
1054 if (eth_skb_pad(skb)) {
1055 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1056 return NETDEV_TX_OK;
1057 }
1058
1059 mlxsw_sp_txhdr_construct(skb, &tx_info);
1060 /* TX header is consumed by HW on the way so we shouldn't count its
1061 * bytes as being sent.
1062 */
1063 len = skb->len - MLXSW_TXHDR_LEN;
1064
1065 /* Due to a race we might fail here because of a full queue. In that
1066 * unlikely case we simply drop the packet.
1067 */
1068 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1069
1070 if (!err) {
1071 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1072 u64_stats_update_begin(&pcpu_stats->syncp);
1073 pcpu_stats->tx_packets++;
1074 pcpu_stats->tx_bytes += len;
1075 u64_stats_update_end(&pcpu_stats->syncp);
1076 } else {
1077 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1078 dev_kfree_skb_any(skb);
1079 }
1080 return NETDEV_TX_OK;
1081 }
1082
mlxsw_sp_set_rx_mode(struct net_device * dev)1083 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1084 {
1085 }
1086
mlxsw_sp_port_set_mac_address(struct net_device * dev,void * p)1087 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1088 {
1089 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1090 struct sockaddr *addr = p;
1091 int err;
1092
1093 if (!is_valid_ether_addr(addr->sa_data))
1094 return -EADDRNOTAVAIL;
1095
1096 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1097 if (err)
1098 return err;
1099 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1100 return 0;
1101 }
1102
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp * mlxsw_sp,int mtu)1103 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1104 int mtu)
1105 {
1106 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1107 }
1108
1109 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1110
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp * mlxsw_sp,int mtu,u16 delay)1111 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1112 u16 delay)
1113 {
1114 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1115 BITS_PER_BYTE));
1116 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1117 mtu);
1118 }
1119
1120 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1121 * Assumes 100m cable and maximum MTU.
1122 */
1123 #define MLXSW_SP_PAUSE_DELAY 58752
1124
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp * mlxsw_sp,int mtu,u16 delay,bool pfc,bool pause)1125 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1126 u16 delay, bool pfc, bool pause)
1127 {
1128 if (pfc)
1129 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1130 else if (pause)
1131 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1132 else
1133 return 0;
1134 }
1135
mlxsw_sp_pg_buf_pack(char * pbmc_pl,int index,u16 size,u16 thres,bool lossy)1136 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1137 bool lossy)
1138 {
1139 if (lossy)
1140 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1141 else
1142 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1143 thres);
1144 }
1145
__mlxsw_sp_port_headroom_set(struct mlxsw_sp_port * mlxsw_sp_port,int mtu,u8 * prio_tc,bool pause_en,struct ieee_pfc * my_pfc)1146 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1147 u8 *prio_tc, bool pause_en,
1148 struct ieee_pfc *my_pfc)
1149 {
1150 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1151 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1152 u16 delay = !!my_pfc ? my_pfc->delay : 0;
1153 char pbmc_pl[MLXSW_REG_PBMC_LEN];
1154 int i, j, err;
1155
1156 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1157 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1158 if (err)
1159 return err;
1160
1161 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1162 bool configure = false;
1163 bool pfc = false;
1164 u16 thres_cells;
1165 u16 delay_cells;
1166 bool lossy;
1167
1168 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1169 if (prio_tc[j] == i) {
1170 pfc = pfc_en & BIT(j);
1171 configure = true;
1172 break;
1173 }
1174 }
1175
1176 if (!configure)
1177 continue;
1178
1179 lossy = !(pfc || pause_en);
1180 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1181 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
1182 pfc, pause_en);
1183 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
1184 thres_cells, lossy);
1185 }
1186
1187 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1188 }
1189
mlxsw_sp_port_headroom_set(struct mlxsw_sp_port * mlxsw_sp_port,int mtu,bool pause_en)1190 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1191 int mtu, bool pause_en)
1192 {
1193 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1194 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1195 struct ieee_pfc *my_pfc;
1196 u8 *prio_tc;
1197
1198 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1199 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1200
1201 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1202 pause_en, my_pfc);
1203 }
1204
mlxsw_sp_port_change_mtu(struct net_device * dev,int mtu)1205 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1206 {
1207 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1208 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1209 int err;
1210
1211 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1212 if (err)
1213 return err;
1214 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1215 if (err)
1216 goto err_span_port_mtu_update;
1217 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1218 if (err)
1219 goto err_port_mtu_set;
1220 dev->mtu = mtu;
1221 return 0;
1222
1223 err_port_mtu_set:
1224 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1225 err_span_port_mtu_update:
1226 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1227 return err;
1228 }
1229
1230 static int
mlxsw_sp_port_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)1231 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1232 struct rtnl_link_stats64 *stats)
1233 {
1234 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1235 struct mlxsw_sp_port_pcpu_stats *p;
1236 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1237 u32 tx_dropped = 0;
1238 unsigned int start;
1239 int i;
1240
1241 for_each_possible_cpu(i) {
1242 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1243 do {
1244 start = u64_stats_fetch_begin_irq(&p->syncp);
1245 rx_packets = p->rx_packets;
1246 rx_bytes = p->rx_bytes;
1247 tx_packets = p->tx_packets;
1248 tx_bytes = p->tx_bytes;
1249 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1250
1251 stats->rx_packets += rx_packets;
1252 stats->rx_bytes += rx_bytes;
1253 stats->tx_packets += tx_packets;
1254 stats->tx_bytes += tx_bytes;
1255 /* tx_dropped is u32, updated without syncp protection. */
1256 tx_dropped += p->tx_dropped;
1257 }
1258 stats->tx_dropped = tx_dropped;
1259 return 0;
1260 }
1261
mlxsw_sp_port_has_offload_stats(const struct net_device * dev,int attr_id)1262 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1263 {
1264 switch (attr_id) {
1265 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1266 return true;
1267 }
1268
1269 return false;
1270 }
1271
mlxsw_sp_port_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)1272 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1273 void *sp)
1274 {
1275 switch (attr_id) {
1276 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1277 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1278 }
1279
1280 return -EINVAL;
1281 }
1282
mlxsw_sp_port_get_stats_raw(struct net_device * dev,int grp,int prio,char * ppcnt_pl)1283 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1284 int prio, char *ppcnt_pl)
1285 {
1286 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1288
1289 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1290 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1291 }
1292
mlxsw_sp_port_get_hw_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)1293 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1294 struct rtnl_link_stats64 *stats)
1295 {
1296 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1297 int err;
1298
1299 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1300 0, ppcnt_pl);
1301 if (err)
1302 goto out;
1303
1304 stats->tx_packets =
1305 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1306 stats->rx_packets =
1307 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1308 stats->tx_bytes =
1309 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1310 stats->rx_bytes =
1311 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1312 stats->multicast =
1313 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1314
1315 stats->rx_crc_errors =
1316 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1317 stats->rx_frame_errors =
1318 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1319
1320 stats->rx_length_errors = (
1321 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1322 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1323 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1324
1325 stats->rx_errors = (stats->rx_crc_errors +
1326 stats->rx_frame_errors + stats->rx_length_errors);
1327
1328 out:
1329 return err;
1330 }
1331
update_stats_cache(struct work_struct * work)1332 static void update_stats_cache(struct work_struct *work)
1333 {
1334 struct mlxsw_sp_port *mlxsw_sp_port =
1335 container_of(work, struct mlxsw_sp_port,
1336 hw_stats.update_dw.work);
1337
1338 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1339 goto out;
1340
1341 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1342 mlxsw_sp_port->hw_stats.cache);
1343
1344 out:
1345 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1346 MLXSW_HW_STATS_UPDATE_TIME);
1347 }
1348
1349 /* Return the stats from a cache that is updated periodically,
1350 * as this function might get called in an atomic context.
1351 */
1352 static void
mlxsw_sp_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1353 mlxsw_sp_port_get_stats64(struct net_device *dev,
1354 struct rtnl_link_stats64 *stats)
1355 {
1356 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1357
1358 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1359 }
1360
__mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1361 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1362 u16 vid_begin, u16 vid_end,
1363 bool is_member, bool untagged)
1364 {
1365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1366 char *spvm_pl;
1367 int err;
1368
1369 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1370 if (!spvm_pl)
1371 return -ENOMEM;
1372
1373 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1374 vid_end, is_member, untagged);
1375 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1376 kfree(spvm_pl);
1377 return err;
1378 }
1379
mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1380 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1381 u16 vid_end, bool is_member, bool untagged)
1382 {
1383 u16 vid, vid_e;
1384 int err;
1385
1386 for (vid = vid_begin; vid <= vid_end;
1387 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1388 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1389 vid_end);
1390
1391 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1392 is_member, untagged);
1393 if (err)
1394 return err;
1395 }
1396
1397 return 0;
1398 }
1399
mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port * mlxsw_sp_port)1400 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1401 {
1402 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1403
1404 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1405 &mlxsw_sp_port->vlans_list, list)
1406 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1407 }
1408
1409 static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1410 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1411 {
1412 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1413 bool untagged = vid == 1;
1414 int err;
1415
1416 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1417 if (err)
1418 return ERR_PTR(err);
1419
1420 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1421 if (!mlxsw_sp_port_vlan) {
1422 err = -ENOMEM;
1423 goto err_port_vlan_alloc;
1424 }
1425
1426 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1427 mlxsw_sp_port_vlan->ref_count = 1;
1428 mlxsw_sp_port_vlan->vid = vid;
1429 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1430
1431 return mlxsw_sp_port_vlan;
1432
1433 err_port_vlan_alloc:
1434 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1435 return ERR_PTR(err);
1436 }
1437
1438 static void
mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1439 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1440 {
1441 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1442 u16 vid = mlxsw_sp_port_vlan->vid;
1443
1444 list_del(&mlxsw_sp_port_vlan->list);
1445 kfree(mlxsw_sp_port_vlan);
1446 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1447 }
1448
1449 struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_get(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1450 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1451 {
1452 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1453
1454 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1455 if (mlxsw_sp_port_vlan) {
1456 mlxsw_sp_port_vlan->ref_count++;
1457 return mlxsw_sp_port_vlan;
1458 }
1459
1460 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1461 }
1462
mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1463 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1464 {
1465 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1466
1467 if (--mlxsw_sp_port_vlan->ref_count != 0)
1468 return;
1469
1470 if (mlxsw_sp_port_vlan->bridge_port)
1471 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1472 else if (fid)
1473 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1474
1475 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1476 }
1477
mlxsw_sp_port_add_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1478 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1479 __be16 __always_unused proto, u16 vid)
1480 {
1481 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1482
1483 /* VLAN 0 is added to HW filter when device goes up, but it is
1484 * reserved in our case, so simply return.
1485 */
1486 if (!vid)
1487 return 0;
1488
1489 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1490 }
1491
mlxsw_sp_port_kill_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1492 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1493 __be16 __always_unused proto, u16 vid)
1494 {
1495 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1496 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1497
1498 /* VLAN 0 is removed from HW filter when device goes down, but
1499 * it is reserved in our case, so simply return.
1500 */
1501 if (!vid)
1502 return 0;
1503
1504 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1505 if (!mlxsw_sp_port_vlan)
1506 return 0;
1507 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1508
1509 return 0;
1510 }
1511
mlxsw_sp_port_get_phys_port_name(struct net_device * dev,char * name,size_t len)1512 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1513 size_t len)
1514 {
1515 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1516 u8 module = mlxsw_sp_port->mapping.module;
1517 u8 width = mlxsw_sp_port->mapping.width;
1518 u8 lane = mlxsw_sp_port->mapping.lane;
1519 int err;
1520
1521 if (!mlxsw_sp_port->split)
1522 err = snprintf(name, len, "p%d", module + 1);
1523 else
1524 err = snprintf(name, len, "p%ds%d", module + 1,
1525 lane / width);
1526
1527 if (err >= len)
1528 return -EINVAL;
1529
1530 return 0;
1531 }
1532
1533 static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port * port,unsigned long cookie)1534 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1535 unsigned long cookie) {
1536 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1537
1538 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1539 if (mall_tc_entry->cookie == cookie)
1540 return mall_tc_entry;
1541
1542 return NULL;
1543 }
1544
1545 static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_port_mall_mirror_tc_entry * mirror,const struct tc_action * a,bool ingress)1546 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1547 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1548 const struct tc_action *a,
1549 bool ingress)
1550 {
1551 struct net *net = dev_net(mlxsw_sp_port->dev);
1552 enum mlxsw_sp_span_type span_type;
1553 struct mlxsw_sp_port *to_port;
1554 struct net_device *to_dev;
1555 int ifindex;
1556
1557 ifindex = tcf_mirred_ifindex(a);
1558 to_dev = __dev_get_by_index(net, ifindex);
1559 if (!to_dev) {
1560 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1561 return -EINVAL;
1562 }
1563
1564 if (!mlxsw_sp_port_dev_check(to_dev)) {
1565 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1566 return -EOPNOTSUPP;
1567 }
1568 to_port = netdev_priv(to_dev);
1569
1570 mirror->to_local_port = to_port->local_port;
1571 mirror->ingress = ingress;
1572 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1573 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1574 }
1575
1576 static void
mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_port_mall_mirror_tc_entry * mirror)1577 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1578 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1579 {
1580 enum mlxsw_sp_span_type span_type;
1581
1582 span_type = mirror->ingress ?
1583 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1584 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
1585 span_type);
1586 }
1587
1588 static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * cls,const struct tc_action * a,bool ingress)1589 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1590 struct tc_cls_matchall_offload *cls,
1591 const struct tc_action *a,
1592 bool ingress)
1593 {
1594 int err;
1595
1596 if (!mlxsw_sp_port->sample)
1597 return -EOPNOTSUPP;
1598 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1599 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1600 return -EEXIST;
1601 }
1602 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1603 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1604 return -EOPNOTSUPP;
1605 }
1606
1607 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1608 tcf_sample_psample_group(a));
1609 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1610 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1611 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1612
1613 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1614 if (err)
1615 goto err_port_sample_set;
1616 return 0;
1617
1618 err_port_sample_set:
1619 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1620 return err;
1621 }
1622
1623 static void
mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port * mlxsw_sp_port)1624 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1625 {
1626 if (!mlxsw_sp_port->sample)
1627 return;
1628
1629 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1630 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1631 }
1632
mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f,bool ingress)1633 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1634 struct tc_cls_matchall_offload *f,
1635 bool ingress)
1636 {
1637 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1638 __be16 protocol = f->common.protocol;
1639 const struct tc_action *a;
1640 LIST_HEAD(actions);
1641 int err;
1642
1643 if (!tcf_exts_has_one_action(f->exts)) {
1644 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1645 return -EOPNOTSUPP;
1646 }
1647
1648 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1649 if (!mall_tc_entry)
1650 return -ENOMEM;
1651 mall_tc_entry->cookie = f->cookie;
1652
1653 tcf_exts_to_list(f->exts, &actions);
1654 a = list_first_entry(&actions, struct tc_action, list);
1655
1656 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1657 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1658
1659 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1660 mirror = &mall_tc_entry->mirror;
1661 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1662 mirror, a, ingress);
1663 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1664 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1665 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1666 a, ingress);
1667 } else {
1668 err = -EOPNOTSUPP;
1669 }
1670
1671 if (err)
1672 goto err_add_action;
1673
1674 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1675 return 0;
1676
1677 err_add_action:
1678 kfree(mall_tc_entry);
1679 return err;
1680 }
1681
mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f)1682 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1683 struct tc_cls_matchall_offload *f)
1684 {
1685 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1686
1687 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1688 f->cookie);
1689 if (!mall_tc_entry) {
1690 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1691 return;
1692 }
1693 list_del(&mall_tc_entry->list);
1694
1695 switch (mall_tc_entry->type) {
1696 case MLXSW_SP_PORT_MALL_MIRROR:
1697 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1698 &mall_tc_entry->mirror);
1699 break;
1700 case MLXSW_SP_PORT_MALL_SAMPLE:
1701 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1702 break;
1703 default:
1704 WARN_ON(1);
1705 }
1706
1707 kfree(mall_tc_entry);
1708 }
1709
mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_matchall_offload * f)1710 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1711 struct tc_cls_matchall_offload *f)
1712 {
1713 bool ingress;
1714
1715 if (is_classid_clsact_ingress(f->common.classid))
1716 ingress = true;
1717 else if (is_classid_clsact_egress(f->common.classid))
1718 ingress = false;
1719 else
1720 return -EOPNOTSUPP;
1721
1722 if (f->common.chain_index)
1723 return -EOPNOTSUPP;
1724
1725 switch (f->command) {
1726 case TC_CLSMATCHALL_REPLACE:
1727 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1728 ingress);
1729 case TC_CLSMATCHALL_DESTROY:
1730 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1731 return 0;
1732 default:
1733 return -EOPNOTSUPP;
1734 }
1735 }
1736
1737 static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_cls_flower_offload * f)1738 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
1739 struct tc_cls_flower_offload *f)
1740 {
1741 bool ingress;
1742
1743 if (is_classid_clsact_ingress(f->common.classid))
1744 ingress = true;
1745 else if (is_classid_clsact_egress(f->common.classid))
1746 ingress = false;
1747 else
1748 return -EOPNOTSUPP;
1749
1750 switch (f->command) {
1751 case TC_CLSFLOWER_REPLACE:
1752 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
1753 case TC_CLSFLOWER_DESTROY:
1754 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1755 return 0;
1756 case TC_CLSFLOWER_STATS:
1757 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1758 default:
1759 return -EOPNOTSUPP;
1760 }
1761 }
1762
mlxsw_sp_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1763 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1764 void *type_data)
1765 {
1766 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1767
1768 switch (type) {
1769 case TC_SETUP_CLSMATCHALL:
1770 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
1771 case TC_SETUP_CLSFLOWER:
1772 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
1773 default:
1774 return -EOPNOTSUPP;
1775 }
1776 }
1777
1778 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1779 .ndo_open = mlxsw_sp_port_open,
1780 .ndo_stop = mlxsw_sp_port_stop,
1781 .ndo_start_xmit = mlxsw_sp_port_xmit,
1782 .ndo_setup_tc = mlxsw_sp_setup_tc,
1783 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1784 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1785 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1786 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1787 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1788 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1789 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1790 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1791 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1792 };
1793
mlxsw_sp_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)1794 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1795 struct ethtool_drvinfo *drvinfo)
1796 {
1797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1799
1800 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1801 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1802 sizeof(drvinfo->version));
1803 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1804 "%d.%d.%d",
1805 mlxsw_sp->bus_info->fw_rev.major,
1806 mlxsw_sp->bus_info->fw_rev.minor,
1807 mlxsw_sp->bus_info->fw_rev.subminor);
1808 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1809 sizeof(drvinfo->bus_info));
1810 }
1811
mlxsw_sp_port_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1812 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1813 struct ethtool_pauseparam *pause)
1814 {
1815 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1816
1817 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1818 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1819 }
1820
mlxsw_sp_port_pause_set(struct mlxsw_sp_port * mlxsw_sp_port,struct ethtool_pauseparam * pause)1821 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1822 struct ethtool_pauseparam *pause)
1823 {
1824 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1825
1826 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1827 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1828 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1829
1830 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1831 pfcc_pl);
1832 }
1833
mlxsw_sp_port_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1834 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1835 struct ethtool_pauseparam *pause)
1836 {
1837 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1838 bool pause_en = pause->tx_pause || pause->rx_pause;
1839 int err;
1840
1841 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1842 netdev_err(dev, "PFC already enabled on port\n");
1843 return -EINVAL;
1844 }
1845
1846 if (pause->autoneg) {
1847 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1848 return -EINVAL;
1849 }
1850
1851 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1852 if (err) {
1853 netdev_err(dev, "Failed to configure port's headroom\n");
1854 return err;
1855 }
1856
1857 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1858 if (err) {
1859 netdev_err(dev, "Failed to set PAUSE parameters\n");
1860 goto err_port_pause_configure;
1861 }
1862
1863 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1864 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1865
1866 return 0;
1867
1868 err_port_pause_configure:
1869 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1870 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1871 return err;
1872 }
1873
1874 struct mlxsw_sp_port_hw_stats {
1875 char str[ETH_GSTRING_LEN];
1876 u64 (*getter)(const char *payload);
1877 bool cells_bytes;
1878 };
1879
1880 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1881 {
1882 .str = "a_frames_transmitted_ok",
1883 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1884 },
1885 {
1886 .str = "a_frames_received_ok",
1887 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1888 },
1889 {
1890 .str = "a_frame_check_sequence_errors",
1891 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1892 },
1893 {
1894 .str = "a_alignment_errors",
1895 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1896 },
1897 {
1898 .str = "a_octets_transmitted_ok",
1899 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1900 },
1901 {
1902 .str = "a_octets_received_ok",
1903 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1904 },
1905 {
1906 .str = "a_multicast_frames_xmitted_ok",
1907 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1908 },
1909 {
1910 .str = "a_broadcast_frames_xmitted_ok",
1911 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1912 },
1913 {
1914 .str = "a_multicast_frames_received_ok",
1915 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1916 },
1917 {
1918 .str = "a_broadcast_frames_received_ok",
1919 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1920 },
1921 {
1922 .str = "a_in_range_length_errors",
1923 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1924 },
1925 {
1926 .str = "a_out_of_range_length_field",
1927 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1928 },
1929 {
1930 .str = "a_frame_too_long_errors",
1931 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1932 },
1933 {
1934 .str = "a_symbol_error_during_carrier",
1935 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1936 },
1937 {
1938 .str = "a_mac_control_frames_transmitted",
1939 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1940 },
1941 {
1942 .str = "a_mac_control_frames_received",
1943 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1944 },
1945 {
1946 .str = "a_unsupported_opcodes_received",
1947 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1948 },
1949 {
1950 .str = "a_pause_mac_ctrl_frames_received",
1951 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1952 },
1953 {
1954 .str = "a_pause_mac_ctrl_frames_xmitted",
1955 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1956 },
1957 };
1958
1959 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1960
1961 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1962 {
1963 .str = "rx_octets_prio",
1964 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1965 },
1966 {
1967 .str = "rx_frames_prio",
1968 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1969 },
1970 {
1971 .str = "tx_octets_prio",
1972 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1973 },
1974 {
1975 .str = "tx_frames_prio",
1976 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1977 },
1978 {
1979 .str = "rx_pause_prio",
1980 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1981 },
1982 {
1983 .str = "rx_pause_duration_prio",
1984 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1985 },
1986 {
1987 .str = "tx_pause_prio",
1988 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1989 },
1990 {
1991 .str = "tx_pause_duration_prio",
1992 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1993 },
1994 };
1995
1996 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1997
1998 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1999 {
2000 .str = "tc_transmit_queue_tc",
2001 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2002 .cells_bytes = true,
2003 },
2004 {
2005 .str = "tc_no_buffer_discard_uc_tc",
2006 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2007 },
2008 };
2009
2010 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2011
2012 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2013 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2014 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2015 IEEE_8021QAZ_MAX_TCS)
2016
mlxsw_sp_port_get_prio_strings(u8 ** p,int prio)2017 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2018 {
2019 int i;
2020
2021 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2022 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2023 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2024 *p += ETH_GSTRING_LEN;
2025 }
2026 }
2027
mlxsw_sp_port_get_tc_strings(u8 ** p,int tc)2028 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2029 {
2030 int i;
2031
2032 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2033 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2034 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2035 *p += ETH_GSTRING_LEN;
2036 }
2037 }
2038
mlxsw_sp_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)2039 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2040 u32 stringset, u8 *data)
2041 {
2042 u8 *p = data;
2043 int i;
2044
2045 switch (stringset) {
2046 case ETH_SS_STATS:
2047 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2048 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2049 ETH_GSTRING_LEN);
2050 p += ETH_GSTRING_LEN;
2051 }
2052
2053 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2054 mlxsw_sp_port_get_prio_strings(&p, i);
2055
2056 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2057 mlxsw_sp_port_get_tc_strings(&p, i);
2058
2059 break;
2060 }
2061 }
2062
mlxsw_sp_port_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)2063 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2064 enum ethtool_phys_id_state state)
2065 {
2066 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2068 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2069 bool active;
2070
2071 switch (state) {
2072 case ETHTOOL_ID_ACTIVE:
2073 active = true;
2074 break;
2075 case ETHTOOL_ID_INACTIVE:
2076 active = false;
2077 break;
2078 default:
2079 return -EOPNOTSUPP;
2080 }
2081
2082 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2083 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2084 }
2085
2086 static int
mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats ** p_hw_stats,int * p_len,enum mlxsw_reg_ppcnt_grp grp)2087 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2088 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2089 {
2090 switch (grp) {
2091 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2092 *p_hw_stats = mlxsw_sp_port_hw_stats;
2093 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2094 break;
2095 case MLXSW_REG_PPCNT_PRIO_CNT:
2096 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2097 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2098 break;
2099 case MLXSW_REG_PPCNT_TC_CNT:
2100 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2101 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2102 break;
2103 default:
2104 WARN_ON(1);
2105 return -EOPNOTSUPP;
2106 }
2107 return 0;
2108 }
2109
__mlxsw_sp_port_get_stats(struct net_device * dev,enum mlxsw_reg_ppcnt_grp grp,int prio,u64 * data,int data_index)2110 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2111 enum mlxsw_reg_ppcnt_grp grp, int prio,
2112 u64 *data, int data_index)
2113 {
2114 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2115 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2116 struct mlxsw_sp_port_hw_stats *hw_stats;
2117 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2118 int i, len;
2119 int err;
2120
2121 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2122 if (err)
2123 return;
2124 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2125 for (i = 0; i < len; i++) {
2126 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2127 if (!hw_stats[i].cells_bytes)
2128 continue;
2129 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2130 data[data_index + i]);
2131 }
2132 }
2133
mlxsw_sp_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2134 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2135 struct ethtool_stats *stats, u64 *data)
2136 {
2137 int i, data_index = 0;
2138
2139 /* IEEE 802.3 Counters */
2140 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2141 data, data_index);
2142 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2143
2144 /* Per-Priority Counters */
2145 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2146 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2147 data, data_index);
2148 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2149 }
2150
2151 /* Per-TC Counters */
2152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2153 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2154 data, data_index);
2155 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2156 }
2157 }
2158
mlxsw_sp_port_get_sset_count(struct net_device * dev,int sset)2159 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2160 {
2161 switch (sset) {
2162 case ETH_SS_STATS:
2163 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2164 default:
2165 return -EOPNOTSUPP;
2166 }
2167 }
2168
2169 struct mlxsw_sp_port_link_mode {
2170 enum ethtool_link_mode_bit_indices mask_ethtool;
2171 u32 mask;
2172 u32 speed;
2173 };
2174
2175 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2176 {
2177 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2178 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2179 .speed = SPEED_100,
2180 },
2181 {
2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2183 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2184 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2185 .speed = SPEED_1000,
2186 },
2187 {
2188 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2189 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2190 .speed = SPEED_10000,
2191 },
2192 {
2193 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2194 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2195 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2196 .speed = SPEED_10000,
2197 },
2198 {
2199 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2200 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2201 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2202 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2203 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2204 .speed = SPEED_10000,
2205 },
2206 {
2207 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2208 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2209 .speed = SPEED_20000,
2210 },
2211 {
2212 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2213 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2214 .speed = SPEED_40000,
2215 },
2216 {
2217 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2218 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2219 .speed = SPEED_40000,
2220 },
2221 {
2222 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2223 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2224 .speed = SPEED_40000,
2225 },
2226 {
2227 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2228 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2229 .speed = SPEED_40000,
2230 },
2231 {
2232 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2233 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2234 .speed = SPEED_25000,
2235 },
2236 {
2237 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2238 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2239 .speed = SPEED_25000,
2240 },
2241 {
2242 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2243 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2244 .speed = SPEED_25000,
2245 },
2246 {
2247 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2248 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2249 .speed = SPEED_25000,
2250 },
2251 {
2252 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2253 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2254 .speed = SPEED_50000,
2255 },
2256 {
2257 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2258 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2259 .speed = SPEED_50000,
2260 },
2261 {
2262 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2263 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2264 .speed = SPEED_50000,
2265 },
2266 {
2267 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2268 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2269 .speed = SPEED_56000,
2270 },
2271 {
2272 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2273 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2274 .speed = SPEED_56000,
2275 },
2276 {
2277 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2278 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2279 .speed = SPEED_56000,
2280 },
2281 {
2282 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2283 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2284 .speed = SPEED_56000,
2285 },
2286 {
2287 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2288 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2289 .speed = SPEED_100000,
2290 },
2291 {
2292 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2293 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2294 .speed = SPEED_100000,
2295 },
2296 {
2297 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2298 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2299 .speed = SPEED_100000,
2300 },
2301 {
2302 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2303 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2304 .speed = SPEED_100000,
2305 },
2306 };
2307
2308 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2309
2310 static void
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)2311 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2312 struct ethtool_link_ksettings *cmd)
2313 {
2314 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2315 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2316 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2317 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2318 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2319 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2320 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2321
2322 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2323 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2324 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2325 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2326 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2327 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2328 }
2329
mlxsw_sp_from_ptys_link(u32 ptys_eth_proto,unsigned long * mode)2330 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2331 {
2332 int i;
2333
2334 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2335 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2336 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2337 mode);
2338 }
2339 }
2340
mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)2341 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2342 struct ethtool_link_ksettings *cmd)
2343 {
2344 u32 speed = SPEED_UNKNOWN;
2345 u8 duplex = DUPLEX_UNKNOWN;
2346 int i;
2347
2348 if (!carrier_ok)
2349 goto out;
2350
2351 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2352 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2353 speed = mlxsw_sp_port_link_mode[i].speed;
2354 duplex = DUPLEX_FULL;
2355 break;
2356 }
2357 }
2358 out:
2359 cmd->base.speed = speed;
2360 cmd->base.duplex = duplex;
2361 }
2362
mlxsw_sp_port_connector_port(u32 ptys_eth_proto)2363 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2364 {
2365 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2366 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2367 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2368 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2369 return PORT_FIBRE;
2370
2371 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2372 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2373 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2374 return PORT_DA;
2375
2376 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2377 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2378 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2379 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2380 return PORT_NONE;
2381
2382 return PORT_OTHER;
2383 }
2384
2385 static u32
mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings * cmd)2386 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2387 {
2388 u32 ptys_proto = 0;
2389 int i;
2390
2391 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2392 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2393 cmd->link_modes.advertising))
2394 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2395 }
2396 return ptys_proto;
2397 }
2398
mlxsw_sp_to_ptys_speed(u32 speed)2399 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2400 {
2401 u32 ptys_proto = 0;
2402 int i;
2403
2404 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2405 if (speed == mlxsw_sp_port_link_mode[i].speed)
2406 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2407 }
2408 return ptys_proto;
2409 }
2410
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)2411 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2412 {
2413 u32 ptys_proto = 0;
2414 int i;
2415
2416 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2417 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2418 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2419 }
2420 return ptys_proto;
2421 }
2422
mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,struct ethtool_link_ksettings * cmd)2423 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2424 struct ethtool_link_ksettings *cmd)
2425 {
2426 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2427 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2428 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2429
2430 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2431 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2432 }
2433
mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin,bool autoneg,struct ethtool_link_ksettings * cmd)2434 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2435 struct ethtool_link_ksettings *cmd)
2436 {
2437 if (!autoneg)
2438 return;
2439
2440 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2441 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2442 }
2443
2444 static void
mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp,u8 autoneg_status,struct ethtool_link_ksettings * cmd)2445 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2446 struct ethtool_link_ksettings *cmd)
2447 {
2448 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2449 return;
2450
2451 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2452 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2453 }
2454
mlxsw_sp_port_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2455 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2456 struct ethtool_link_ksettings *cmd)
2457 {
2458 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2459 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2460 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2461 char ptys_pl[MLXSW_REG_PTYS_LEN];
2462 u8 autoneg_status;
2463 bool autoneg;
2464 int err;
2465
2466 autoneg = mlxsw_sp_port->link.autoneg;
2467 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2468 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2469 if (err)
2470 return err;
2471 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2472 ð_proto_oper);
2473
2474 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2475
2476 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2477
2478 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2479 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2480 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2481
2482 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2483 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2484 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2485 cmd);
2486
2487 return 0;
2488 }
2489
2490 static int
mlxsw_sp_port_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2491 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2492 const struct ethtool_link_ksettings *cmd)
2493 {
2494 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2495 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2496 char ptys_pl[MLXSW_REG_PTYS_LEN];
2497 u32 eth_proto_cap, eth_proto_new;
2498 bool autoneg;
2499 int err;
2500
2501 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2502 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2503 if (err)
2504 return err;
2505 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2506
2507 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2508 if (!autoneg && cmd->base.speed == SPEED_56000) {
2509 netdev_err(dev, "56G not supported with autoneg off\n");
2510 return -EINVAL;
2511 }
2512 eth_proto_new = autoneg ?
2513 mlxsw_sp_to_ptys_advert_link(cmd) :
2514 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2515
2516 eth_proto_new = eth_proto_new & eth_proto_cap;
2517 if (!eth_proto_new) {
2518 netdev_err(dev, "No supported speed requested\n");
2519 return -EINVAL;
2520 }
2521
2522 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2523 eth_proto_new);
2524 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2525 if (err)
2526 return err;
2527
2528 mlxsw_sp_port->link.autoneg = autoneg;
2529
2530 if (!netif_running(dev))
2531 return 0;
2532
2533 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2534 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2535
2536 return 0;
2537 }
2538
mlxsw_sp_flash_device(struct net_device * dev,struct ethtool_flash * flash)2539 static int mlxsw_sp_flash_device(struct net_device *dev,
2540 struct ethtool_flash *flash)
2541 {
2542 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2544 const struct firmware *firmware;
2545 int err;
2546
2547 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2548 return -EOPNOTSUPP;
2549
2550 dev_hold(dev);
2551 rtnl_unlock();
2552
2553 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2554 if (err)
2555 goto out;
2556 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2557 release_firmware(firmware);
2558 out:
2559 rtnl_lock();
2560 dev_put(dev);
2561 return err;
2562 }
2563
2564 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2565 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2566 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2567
mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port * mlxsw_sp_port,u16 offset,u16 size,void * data,unsigned int * p_read_size)2568 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2569 u16 offset, u16 size, void *data,
2570 unsigned int *p_read_size)
2571 {
2572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2573 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2574 char mcia_pl[MLXSW_REG_MCIA_LEN];
2575 u16 i2c_addr;
2576 int status;
2577 int err;
2578
2579 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2580
2581 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2582 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2583 /* Cross pages read, read until offset 256 in low page */
2584 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2585
2586 i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2587 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2588 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2589 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2590 }
2591
2592 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2593 0, 0, offset, size, i2c_addr);
2594
2595 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2596 if (err)
2597 return err;
2598
2599 status = mlxsw_reg_mcia_status_get(mcia_pl);
2600 if (status)
2601 return -EIO;
2602
2603 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2604 memcpy(data, eeprom_tmp, size);
2605 *p_read_size = size;
2606
2607 return 0;
2608 }
2609
2610 enum mlxsw_sp_eeprom_module_info_rev_id {
2611 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2612 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2613 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2614 };
2615
2616 enum mlxsw_sp_eeprom_module_info_id {
2617 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2618 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2619 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2620 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2621 };
2622
2623 enum mlxsw_sp_eeprom_module_info {
2624 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2625 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2626 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2627 };
2628
mlxsw_sp_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)2629 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2630 struct ethtool_modinfo *modinfo)
2631 {
2632 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2633 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2634 u8 module_rev_id, module_id;
2635 unsigned int read_size;
2636 int err;
2637
2638 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2639 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2640 module_info, &read_size);
2641 if (err)
2642 return err;
2643
2644 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2645 return -EIO;
2646
2647 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2648 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2649
2650 switch (module_id) {
2651 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2652 modinfo->type = ETH_MODULE_SFF_8436;
2653 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2654 break;
2655 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2656 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2657 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2658 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2659 modinfo->type = ETH_MODULE_SFF_8636;
2660 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2661 } else {
2662 modinfo->type = ETH_MODULE_SFF_8436;
2663 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2664 }
2665 break;
2666 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2667 modinfo->type = ETH_MODULE_SFF_8472;
2668 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2669 break;
2670 default:
2671 return -EINVAL;
2672 }
2673
2674 return 0;
2675 }
2676
mlxsw_sp_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)2677 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2678 struct ethtool_eeprom *ee,
2679 u8 *data)
2680 {
2681 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2682 int offset = ee->offset;
2683 unsigned int read_size;
2684 int i = 0;
2685 int err;
2686
2687 if (!ee->len)
2688 return -EINVAL;
2689
2690 memset(data, 0, ee->len);
2691
2692 while (i < ee->len) {
2693 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2694 ee->len - i, data + i,
2695 &read_size);
2696 if (err) {
2697 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2698 return err;
2699 }
2700
2701 i += read_size;
2702 offset += read_size;
2703 }
2704
2705 return 0;
2706 }
2707
2708 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2709 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2710 .get_link = ethtool_op_get_link,
2711 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2712 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2713 .get_strings = mlxsw_sp_port_get_strings,
2714 .set_phys_id = mlxsw_sp_port_set_phys_id,
2715 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2716 .get_sset_count = mlxsw_sp_port_get_sset_count,
2717 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2718 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2719 .flash_device = mlxsw_sp_flash_device,
2720 .get_module_info = mlxsw_sp_get_module_info,
2721 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
2722 };
2723
2724 static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 width)2725 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2726 {
2727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2728 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2729 char ptys_pl[MLXSW_REG_PTYS_LEN];
2730 u32 eth_proto_admin;
2731
2732 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2733 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2734 eth_proto_admin);
2735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2736 }
2737
mlxsw_sp_port_ets_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,bool dwrr,u8 dwrr_weight)2738 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2739 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2740 bool dwrr, u8 dwrr_weight)
2741 {
2742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2743 char qeec_pl[MLXSW_REG_QEEC_LEN];
2744
2745 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2746 next_index);
2747 mlxsw_reg_qeec_de_set(qeec_pl, true);
2748 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2749 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2751 }
2752
mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 maxrate)2753 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2754 enum mlxsw_reg_qeec_hr hr, u8 index,
2755 u8 next_index, u32 maxrate)
2756 {
2757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2758 char qeec_pl[MLXSW_REG_QEEC_LEN];
2759
2760 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2761 next_index);
2762 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2763 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2765 }
2766
mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 switch_prio,u8 tclass)2767 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2768 u8 switch_prio, u8 tclass)
2769 {
2770 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2771 char qtct_pl[MLXSW_REG_QTCT_LEN];
2772
2773 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2774 tclass);
2775 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2776 }
2777
mlxsw_sp_port_ets_init(struct mlxsw_sp_port * mlxsw_sp_port)2778 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2779 {
2780 int err, i;
2781
2782 /* Setup the elements hierarcy, so that each TC is linked to
2783 * one subgroup, which are all member in the same group.
2784 */
2785 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2786 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2787 0);
2788 if (err)
2789 return err;
2790 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2791 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2792 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2793 0, false, 0);
2794 if (err)
2795 return err;
2796 }
2797 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2798 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2799 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2800 false, 0);
2801 if (err)
2802 return err;
2803 }
2804
2805 /* Make sure the max shaper is disabled in all hierarcies that
2806 * support it.
2807 */
2808 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2809 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2810 MLXSW_REG_QEEC_MAS_DIS);
2811 if (err)
2812 return err;
2813 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2814 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2815 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2816 i, 0,
2817 MLXSW_REG_QEEC_MAS_DIS);
2818 if (err)
2819 return err;
2820 }
2821 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2822 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2823 MLXSW_REG_QEEC_HIERARCY_TC,
2824 i, i,
2825 MLXSW_REG_QEEC_MAS_DIS);
2826 if (err)
2827 return err;
2828
2829 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2830 MLXSW_REG_QEEC_HIERARCY_TC,
2831 i + 8, i,
2832 MLXSW_REG_QEEC_MAS_DIS);
2833 if (err)
2834 return err;
2835 }
2836
2837 /* Map all priorities to traffic class 0. */
2838 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2839 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2840 if (err)
2841 return err;
2842 }
2843
2844 return 0;
2845 }
2846
mlxsw_sp_port_create(struct mlxsw_sp * mlxsw_sp,u8 local_port,bool split,u8 module,u8 width,u8 lane)2847 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2848 bool split, u8 module, u8 width, u8 lane)
2849 {
2850 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2851 struct mlxsw_sp_port *mlxsw_sp_port;
2852 struct net_device *dev;
2853 int err;
2854
2855 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2856 if (err) {
2857 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2858 local_port);
2859 return err;
2860 }
2861
2862 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2863 if (!dev) {
2864 err = -ENOMEM;
2865 goto err_alloc_etherdev;
2866 }
2867 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2868 mlxsw_sp_port = netdev_priv(dev);
2869 mlxsw_sp_port->dev = dev;
2870 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2871 mlxsw_sp_port->local_port = local_port;
2872 mlxsw_sp_port->pvid = 1;
2873 mlxsw_sp_port->split = split;
2874 mlxsw_sp_port->mapping.module = module;
2875 mlxsw_sp_port->mapping.width = width;
2876 mlxsw_sp_port->mapping.lane = lane;
2877 mlxsw_sp_port->link.autoneg = 1;
2878 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2879 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2880
2881 mlxsw_sp_port->pcpu_stats =
2882 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2883 if (!mlxsw_sp_port->pcpu_stats) {
2884 err = -ENOMEM;
2885 goto err_alloc_stats;
2886 }
2887
2888 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2889 GFP_KERNEL);
2890 if (!mlxsw_sp_port->sample) {
2891 err = -ENOMEM;
2892 goto err_alloc_sample;
2893 }
2894
2895 mlxsw_sp_port->hw_stats.cache =
2896 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2897
2898 if (!mlxsw_sp_port->hw_stats.cache) {
2899 err = -ENOMEM;
2900 goto err_alloc_hw_stats;
2901 }
2902 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2903 &update_stats_cache);
2904
2905 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2906 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2907
2908 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2909 if (err) {
2910 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2911 mlxsw_sp_port->local_port);
2912 goto err_port_module_map;
2913 }
2914
2915 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2916 if (err) {
2917 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2918 mlxsw_sp_port->local_port);
2919 goto err_port_swid_set;
2920 }
2921
2922 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2923 if (err) {
2924 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2925 mlxsw_sp_port->local_port);
2926 goto err_dev_addr_init;
2927 }
2928
2929 netif_carrier_off(dev);
2930
2931 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2932 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2933 dev->hw_features |= NETIF_F_HW_TC;
2934
2935 dev->min_mtu = 0;
2936 dev->max_mtu = ETH_MAX_MTU;
2937
2938 /* Each packet needs to have a Tx header (metadata) on top all other
2939 * headers.
2940 */
2941 dev->needed_headroom = MLXSW_TXHDR_LEN;
2942
2943 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2944 if (err) {
2945 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2946 mlxsw_sp_port->local_port);
2947 goto err_port_system_port_mapping_set;
2948 }
2949
2950 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2951 if (err) {
2952 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2953 mlxsw_sp_port->local_port);
2954 goto err_port_speed_by_width_set;
2955 }
2956
2957 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2958 if (err) {
2959 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2960 mlxsw_sp_port->local_port);
2961 goto err_port_mtu_set;
2962 }
2963
2964 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2965 if (err)
2966 goto err_port_admin_status_set;
2967
2968 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2969 if (err) {
2970 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2971 mlxsw_sp_port->local_port);
2972 goto err_port_buffers_init;
2973 }
2974
2975 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2976 if (err) {
2977 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2978 mlxsw_sp_port->local_port);
2979 goto err_port_ets_init;
2980 }
2981
2982 /* ETS and buffers must be initialized before DCB. */
2983 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2984 if (err) {
2985 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2986 mlxsw_sp_port->local_port);
2987 goto err_port_dcb_init;
2988 }
2989
2990 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2991 if (err) {
2992 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2993 mlxsw_sp_port->local_port);
2994 goto err_port_fids_init;
2995 }
2996
2997 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2998 if (IS_ERR(mlxsw_sp_port_vlan)) {
2999 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3000 mlxsw_sp_port->local_port);
3001 err = PTR_ERR(mlxsw_sp_port_vlan);
3002 goto err_port_vlan_get;
3003 }
3004
3005 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
3006 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3007 err = register_netdev(dev);
3008 if (err) {
3009 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3010 mlxsw_sp_port->local_port);
3011 goto err_register_netdev;
3012 }
3013
3014 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3015 mlxsw_sp_port, dev, mlxsw_sp_port->split,
3016 module);
3017 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
3018 return 0;
3019
3020 err_register_netdev:
3021 mlxsw_sp->ports[local_port] = NULL;
3022 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3023 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
3024 err_port_vlan_get:
3025 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3026 err_port_fids_init:
3027 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3028 err_port_dcb_init:
3029 err_port_ets_init:
3030 err_port_buffers_init:
3031 err_port_admin_status_set:
3032 err_port_mtu_set:
3033 err_port_speed_by_width_set:
3034 err_port_system_port_mapping_set:
3035 err_dev_addr_init:
3036 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3037 err_port_swid_set:
3038 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3039 err_port_module_map:
3040 kfree(mlxsw_sp_port->hw_stats.cache);
3041 err_alloc_hw_stats:
3042 kfree(mlxsw_sp_port->sample);
3043 err_alloc_sample:
3044 free_percpu(mlxsw_sp_port->pcpu_stats);
3045 err_alloc_stats:
3046 free_netdev(dev);
3047 err_alloc_etherdev:
3048 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3049 return err;
3050 }
3051
mlxsw_sp_port_remove(struct mlxsw_sp * mlxsw_sp,u8 local_port)3052 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3053 {
3054 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3055
3056 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
3057 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3058 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3059 mlxsw_sp->ports[local_port] = NULL;
3060 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3061 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3062 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3063 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3064 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3065 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3066 kfree(mlxsw_sp_port->hw_stats.cache);
3067 kfree(mlxsw_sp_port->sample);
3068 free_percpu(mlxsw_sp_port->pcpu_stats);
3069 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3070 free_netdev(mlxsw_sp_port->dev);
3071 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3072 }
3073
mlxsw_sp_port_created(struct mlxsw_sp * mlxsw_sp,u8 local_port)3074 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3075 {
3076 return mlxsw_sp->ports[local_port] != NULL;
3077 }
3078
mlxsw_sp_ports_remove(struct mlxsw_sp * mlxsw_sp)3079 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3080 {
3081 int i;
3082
3083 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3084 if (mlxsw_sp_port_created(mlxsw_sp, i))
3085 mlxsw_sp_port_remove(mlxsw_sp, i);
3086 kfree(mlxsw_sp->port_to_module);
3087 kfree(mlxsw_sp->ports);
3088 }
3089
mlxsw_sp_ports_create(struct mlxsw_sp * mlxsw_sp)3090 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3091 {
3092 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3093 u8 module, width, lane;
3094 size_t alloc_size;
3095 int i;
3096 int err;
3097
3098 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3099 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3100 if (!mlxsw_sp->ports)
3101 return -ENOMEM;
3102
3103 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3104 if (!mlxsw_sp->port_to_module) {
3105 err = -ENOMEM;
3106 goto err_port_to_module_alloc;
3107 }
3108
3109 for (i = 1; i < max_ports; i++) {
3110 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3111 &width, &lane);
3112 if (err)
3113 goto err_port_module_info_get;
3114 if (!width)
3115 continue;
3116 mlxsw_sp->port_to_module[i] = module;
3117 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3118 module, width, lane);
3119 if (err)
3120 goto err_port_create;
3121 }
3122 return 0;
3123
3124 err_port_create:
3125 err_port_module_info_get:
3126 for (i--; i >= 1; i--)
3127 if (mlxsw_sp_port_created(mlxsw_sp, i))
3128 mlxsw_sp_port_remove(mlxsw_sp, i);
3129 kfree(mlxsw_sp->port_to_module);
3130 err_port_to_module_alloc:
3131 kfree(mlxsw_sp->ports);
3132 return err;
3133 }
3134
mlxsw_sp_cluster_base_port_get(u8 local_port)3135 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3136 {
3137 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3138
3139 return local_port - offset;
3140 }
3141
mlxsw_sp_port_split_create(struct mlxsw_sp * mlxsw_sp,u8 base_port,u8 module,unsigned int count)3142 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3143 u8 module, unsigned int count)
3144 {
3145 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3146 int err, i;
3147
3148 for (i = 0; i < count; i++) {
3149 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3150 module, width, i * width);
3151 if (err)
3152 goto err_port_create;
3153 }
3154
3155 return 0;
3156
3157 err_port_create:
3158 for (i--; i >= 0; i--)
3159 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3160 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3161 return err;
3162 }
3163
mlxsw_sp_port_unsplit_create(struct mlxsw_sp * mlxsw_sp,u8 base_port,unsigned int count)3164 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3165 u8 base_port, unsigned int count)
3166 {
3167 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3168 int i;
3169
3170 /* Split by four means we need to re-create two ports, otherwise
3171 * only one.
3172 */
3173 count = count / 2;
3174
3175 for (i = 0; i < count; i++) {
3176 local_port = base_port + i * 2;
3177 module = mlxsw_sp->port_to_module[local_port];
3178
3179 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3180 width, 0);
3181 }
3182 }
3183
mlxsw_sp_port_split(struct mlxsw_core * mlxsw_core,u8 local_port,unsigned int count)3184 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3185 unsigned int count)
3186 {
3187 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3188 struct mlxsw_sp_port *mlxsw_sp_port;
3189 u8 module, cur_width, base_port;
3190 int i;
3191 int err;
3192
3193 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3194 if (!mlxsw_sp_port) {
3195 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3196 local_port);
3197 return -EINVAL;
3198 }
3199
3200 module = mlxsw_sp_port->mapping.module;
3201 cur_width = mlxsw_sp_port->mapping.width;
3202
3203 if (count != 2 && count != 4) {
3204 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3205 return -EINVAL;
3206 }
3207
3208 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3209 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3210 return -EINVAL;
3211 }
3212
3213 /* Make sure we have enough slave (even) ports for the split. */
3214 if (count == 2) {
3215 base_port = local_port;
3216 if (mlxsw_sp->ports[base_port + 1]) {
3217 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3218 return -EINVAL;
3219 }
3220 } else {
3221 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3222 if (mlxsw_sp->ports[base_port + 1] ||
3223 mlxsw_sp->ports[base_port + 3]) {
3224 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3225 return -EINVAL;
3226 }
3227 }
3228
3229 for (i = 0; i < count; i++)
3230 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3231 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3232
3233 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3234 if (err) {
3235 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3236 goto err_port_split_create;
3237 }
3238
3239 return 0;
3240
3241 err_port_split_create:
3242 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3243 return err;
3244 }
3245
mlxsw_sp_port_unsplit(struct mlxsw_core * mlxsw_core,u8 local_port)3246 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3247 {
3248 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3249 struct mlxsw_sp_port *mlxsw_sp_port;
3250 u8 cur_width, base_port;
3251 unsigned int count;
3252 int i;
3253
3254 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3255 if (!mlxsw_sp_port) {
3256 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3257 local_port);
3258 return -EINVAL;
3259 }
3260
3261 if (!mlxsw_sp_port->split) {
3262 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3263 return -EINVAL;
3264 }
3265
3266 cur_width = mlxsw_sp_port->mapping.width;
3267 count = cur_width == 1 ? 4 : 2;
3268
3269 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3270
3271 /* Determine which ports to remove. */
3272 if (count == 2 && local_port >= base_port + 2)
3273 base_port = base_port + 2;
3274
3275 for (i = 0; i < count; i++)
3276 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3277 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3278
3279 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3280
3281 return 0;
3282 }
3283
mlxsw_sp_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)3284 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3285 char *pude_pl, void *priv)
3286 {
3287 struct mlxsw_sp *mlxsw_sp = priv;
3288 struct mlxsw_sp_port *mlxsw_sp_port;
3289 enum mlxsw_reg_pude_oper_status status;
3290 u8 local_port;
3291
3292 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3293 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3294 if (!mlxsw_sp_port)
3295 return;
3296
3297 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3298 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3299 netdev_info(mlxsw_sp_port->dev, "link up\n");
3300 netif_carrier_on(mlxsw_sp_port->dev);
3301 } else {
3302 netdev_info(mlxsw_sp_port->dev, "link down\n");
3303 netif_carrier_off(mlxsw_sp_port->dev);
3304 }
3305 }
3306
mlxsw_sp_rx_listener_no_mark_func(struct sk_buff * skb,u8 local_port,void * priv)3307 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3308 u8 local_port, void *priv)
3309 {
3310 struct mlxsw_sp *mlxsw_sp = priv;
3311 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3312 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3313
3314 if (unlikely(!mlxsw_sp_port)) {
3315 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3316 local_port);
3317 return;
3318 }
3319
3320 skb->dev = mlxsw_sp_port->dev;
3321
3322 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3323 u64_stats_update_begin(&pcpu_stats->syncp);
3324 pcpu_stats->rx_packets++;
3325 pcpu_stats->rx_bytes += skb->len;
3326 u64_stats_update_end(&pcpu_stats->syncp);
3327
3328 skb->protocol = eth_type_trans(skb, skb->dev);
3329 netif_receive_skb(skb);
3330 }
3331
mlxsw_sp_rx_listener_mark_func(struct sk_buff * skb,u8 local_port,void * priv)3332 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3333 void *priv)
3334 {
3335 skb->offload_fwd_mark = 1;
3336 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3337 }
3338
mlxsw_sp_rx_listener_sample_func(struct sk_buff * skb,u8 local_port,void * priv)3339 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3340 void *priv)
3341 {
3342 struct mlxsw_sp *mlxsw_sp = priv;
3343 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3344 struct psample_group *psample_group;
3345 u32 size;
3346
3347 if (unlikely(!mlxsw_sp_port)) {
3348 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3349 local_port);
3350 goto out;
3351 }
3352 if (unlikely(!mlxsw_sp_port->sample)) {
3353 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3354 local_port);
3355 goto out;
3356 }
3357
3358 size = mlxsw_sp_port->sample->truncate ?
3359 mlxsw_sp_port->sample->trunc_size : skb->len;
3360
3361 rcu_read_lock();
3362 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3363 if (!psample_group)
3364 goto out_unlock;
3365 psample_sample_packet(psample_group, skb, size,
3366 mlxsw_sp_port->dev->ifindex, 0,
3367 mlxsw_sp_port->sample->rate);
3368 out_unlock:
3369 rcu_read_unlock();
3370 out:
3371 consume_skb(skb);
3372 }
3373
3374 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3375 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3376 _is_ctrl, SP_##_trap_group, DISCARD)
3377
3378 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3379 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3380 _is_ctrl, SP_##_trap_group, DISCARD)
3381
3382 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3383 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3384
3385 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3386 /* Events */
3387 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3388 /* L2 traps */
3389 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3390 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3391 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3392 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3393 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3394 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3395 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3396 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3397 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3398 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3399 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3400 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3401 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3402 false),
3403 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3404 false),
3405 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3406 false),
3407 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3408 false),
3409 /* L3 traps */
3410 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3411 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3412 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3413 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3414 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3415 false),
3416 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3417 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3418 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3419 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3420 false),
3421 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3422 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3423 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3424 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3425 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3426 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3427 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3428 false),
3429 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3430 false),
3431 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3432 false),
3433 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3434 false),
3435 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3436 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3437 false),
3438 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3439 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3440 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3441 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3442 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3443 /* PKT Sample trap */
3444 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3445 false, SP_IP2ME, DISCARD),
3446 /* ACL trap */
3447 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3448 };
3449
mlxsw_sp_cpu_policers_set(struct mlxsw_core * mlxsw_core)3450 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3451 {
3452 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3453 enum mlxsw_reg_qpcr_ir_units ir_units;
3454 int max_cpu_policers;
3455 bool is_bytes;
3456 u8 burst_size;
3457 u32 rate;
3458 int i, err;
3459
3460 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3461 return -EIO;
3462
3463 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3464
3465 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3466 for (i = 0; i < max_cpu_policers; i++) {
3467 is_bytes = false;
3468 switch (i) {
3469 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3470 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3471 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3472 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3473 rate = 128;
3474 burst_size = 7;
3475 break;
3476 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3477 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3478 rate = 16 * 1024;
3479 burst_size = 10;
3480 break;
3481 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3482 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3483 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3484 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3485 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3486 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3487 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3488 rate = 1024;
3489 burst_size = 7;
3490 break;
3491 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3492 rate = 4 * 1024;
3493 burst_size = 4;
3494 break;
3495 default:
3496 continue;
3497 }
3498
3499 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3500 burst_size);
3501 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3502 if (err)
3503 return err;
3504 }
3505
3506 return 0;
3507 }
3508
mlxsw_sp_trap_groups_set(struct mlxsw_core * mlxsw_core)3509 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3510 {
3511 char htgt_pl[MLXSW_REG_HTGT_LEN];
3512 enum mlxsw_reg_htgt_trap_group i;
3513 int max_cpu_policers;
3514 int max_trap_groups;
3515 u8 priority, tc;
3516 u16 policer_id;
3517 int err;
3518
3519 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3520 return -EIO;
3521
3522 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3523 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3524
3525 for (i = 0; i < max_trap_groups; i++) {
3526 policer_id = i;
3527 switch (i) {
3528 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3529 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3530 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3531 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3532 priority = 5;
3533 tc = 5;
3534 break;
3535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3536 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3537 priority = 4;
3538 tc = 4;
3539 break;
3540 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3542 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3543 priority = 3;
3544 tc = 3;
3545 break;
3546 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3547 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3548 priority = 2;
3549 tc = 2;
3550 break;
3551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3552 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3553 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3554 priority = 1;
3555 tc = 1;
3556 break;
3557 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3558 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3559 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3560 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3561 break;
3562 default:
3563 continue;
3564 }
3565
3566 if (max_cpu_policers <= policer_id &&
3567 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3568 return -EIO;
3569
3570 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3571 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3572 if (err)
3573 return err;
3574 }
3575
3576 return 0;
3577 }
3578
mlxsw_sp_traps_init(struct mlxsw_sp * mlxsw_sp)3579 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3580 {
3581 int i;
3582 int err;
3583
3584 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3585 if (err)
3586 return err;
3587
3588 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3589 if (err)
3590 return err;
3591
3592 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3593 err = mlxsw_core_trap_register(mlxsw_sp->core,
3594 &mlxsw_sp_listener[i],
3595 mlxsw_sp);
3596 if (err)
3597 goto err_listener_register;
3598
3599 }
3600 return 0;
3601
3602 err_listener_register:
3603 for (i--; i >= 0; i--) {
3604 mlxsw_core_trap_unregister(mlxsw_sp->core,
3605 &mlxsw_sp_listener[i],
3606 mlxsw_sp);
3607 }
3608 return err;
3609 }
3610
mlxsw_sp_traps_fini(struct mlxsw_sp * mlxsw_sp)3611 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3612 {
3613 int i;
3614
3615 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3616 mlxsw_core_trap_unregister(mlxsw_sp->core,
3617 &mlxsw_sp_listener[i],
3618 mlxsw_sp);
3619 }
3620 }
3621
mlxsw_sp_lag_init(struct mlxsw_sp * mlxsw_sp)3622 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3623 {
3624 char slcr_pl[MLXSW_REG_SLCR_LEN];
3625 int err;
3626
3627 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3628 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3629 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3630 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3631 MLXSW_REG_SLCR_LAG_HASH_SIP |
3632 MLXSW_REG_SLCR_LAG_HASH_DIP |
3633 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3634 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3635 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3636 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3637 if (err)
3638 return err;
3639
3640 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3641 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3642 return -EIO;
3643
3644 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3645 sizeof(struct mlxsw_sp_upper),
3646 GFP_KERNEL);
3647 if (!mlxsw_sp->lags)
3648 return -ENOMEM;
3649
3650 return 0;
3651 }
3652
mlxsw_sp_lag_fini(struct mlxsw_sp * mlxsw_sp)3653 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3654 {
3655 kfree(mlxsw_sp->lags);
3656 }
3657
mlxsw_sp_basic_trap_groups_set(struct mlxsw_core * mlxsw_core)3658 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3659 {
3660 char htgt_pl[MLXSW_REG_HTGT_LEN];
3661
3662 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3663 MLXSW_REG_HTGT_INVALID_POLICER,
3664 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3665 MLXSW_REG_HTGT_DEFAULT_TC);
3666 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3667 }
3668
mlxsw_sp_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)3669 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3670 const struct mlxsw_bus_info *mlxsw_bus_info)
3671 {
3672 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3673 int err;
3674
3675 mlxsw_sp->core = mlxsw_core;
3676 mlxsw_sp->bus_info = mlxsw_bus_info;
3677
3678 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3679 if (err) {
3680 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3681 return err;
3682 }
3683
3684 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3685 if (err) {
3686 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3687 return err;
3688 }
3689
3690 err = mlxsw_sp_fids_init(mlxsw_sp);
3691 if (err) {
3692 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3693 return err;
3694 }
3695
3696 err = mlxsw_sp_traps_init(mlxsw_sp);
3697 if (err) {
3698 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3699 goto err_traps_init;
3700 }
3701
3702 err = mlxsw_sp_buffers_init(mlxsw_sp);
3703 if (err) {
3704 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3705 goto err_buffers_init;
3706 }
3707
3708 err = mlxsw_sp_lag_init(mlxsw_sp);
3709 if (err) {
3710 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3711 goto err_lag_init;
3712 }
3713
3714 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3715 if (err) {
3716 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3717 goto err_switchdev_init;
3718 }
3719
3720 err = mlxsw_sp_router_init(mlxsw_sp);
3721 if (err) {
3722 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3723 goto err_router_init;
3724 }
3725
3726 err = mlxsw_sp_span_init(mlxsw_sp);
3727 if (err) {
3728 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3729 goto err_span_init;
3730 }
3731
3732 err = mlxsw_sp_acl_init(mlxsw_sp);
3733 if (err) {
3734 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3735 goto err_acl_init;
3736 }
3737
3738 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3739 if (err) {
3740 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3741 goto err_counter_pool_init;
3742 }
3743
3744 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3745 if (err) {
3746 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3747 goto err_dpipe_init;
3748 }
3749
3750 err = mlxsw_sp_ports_create(mlxsw_sp);
3751 if (err) {
3752 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3753 goto err_ports_create;
3754 }
3755
3756 return 0;
3757
3758 err_ports_create:
3759 mlxsw_sp_dpipe_fini(mlxsw_sp);
3760 err_dpipe_init:
3761 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3762 err_counter_pool_init:
3763 mlxsw_sp_acl_fini(mlxsw_sp);
3764 err_acl_init:
3765 mlxsw_sp_span_fini(mlxsw_sp);
3766 err_span_init:
3767 mlxsw_sp_router_fini(mlxsw_sp);
3768 err_router_init:
3769 mlxsw_sp_switchdev_fini(mlxsw_sp);
3770 err_switchdev_init:
3771 mlxsw_sp_lag_fini(mlxsw_sp);
3772 err_lag_init:
3773 mlxsw_sp_buffers_fini(mlxsw_sp);
3774 err_buffers_init:
3775 mlxsw_sp_traps_fini(mlxsw_sp);
3776 err_traps_init:
3777 mlxsw_sp_fids_fini(mlxsw_sp);
3778 return err;
3779 }
3780
mlxsw_sp_fini(struct mlxsw_core * mlxsw_core)3781 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3782 {
3783 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3784
3785 mlxsw_sp_ports_remove(mlxsw_sp);
3786 mlxsw_sp_dpipe_fini(mlxsw_sp);
3787 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3788 mlxsw_sp_acl_fini(mlxsw_sp);
3789 mlxsw_sp_span_fini(mlxsw_sp);
3790 mlxsw_sp_router_fini(mlxsw_sp);
3791 mlxsw_sp_switchdev_fini(mlxsw_sp);
3792 mlxsw_sp_lag_fini(mlxsw_sp);
3793 mlxsw_sp_buffers_fini(mlxsw_sp);
3794 mlxsw_sp_traps_fini(mlxsw_sp);
3795 mlxsw_sp_fids_fini(mlxsw_sp);
3796 }
3797
3798 static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
3799 .used_max_vepa_channels = 1,
3800 .max_vepa_channels = 0,
3801 .used_max_mid = 1,
3802 .max_mid = MLXSW_SP_MID_MAX,
3803 .used_max_pgt = 1,
3804 .max_pgt = 0,
3805 .used_flood_tables = 1,
3806 .used_flood_mode = 1,
3807 .flood_mode = 3,
3808 .max_fid_offset_flood_tables = 3,
3809 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3810 .max_fid_flood_tables = 3,
3811 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
3812 .used_max_ib_mc = 1,
3813 .max_ib_mc = 0,
3814 .used_max_pkey = 1,
3815 .max_pkey = 0,
3816 .used_kvd_split_data = 1,
3817 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3818 .kvd_hash_single_parts = 2,
3819 .kvd_hash_double_parts = 1,
3820 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3821 .swid_config = {
3822 {
3823 .used_type = 1,
3824 .type = MLXSW_PORT_SWID_TYPE_ETH,
3825 }
3826 },
3827 .resource_query_enable = 1,
3828 };
3829
3830 static struct mlxsw_driver mlxsw_sp_driver = {
3831 .kind = mlxsw_sp_driver_name,
3832 .priv_size = sizeof(struct mlxsw_sp),
3833 .init = mlxsw_sp_init,
3834 .fini = mlxsw_sp_fini,
3835 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3836 .port_split = mlxsw_sp_port_split,
3837 .port_unsplit = mlxsw_sp_port_unsplit,
3838 .sb_pool_get = mlxsw_sp_sb_pool_get,
3839 .sb_pool_set = mlxsw_sp_sb_pool_set,
3840 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3841 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3842 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3843 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3844 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3845 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3846 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3847 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3848 .txhdr_construct = mlxsw_sp_txhdr_construct,
3849 .txhdr_len = MLXSW_TXHDR_LEN,
3850 .profile = &mlxsw_sp_config_profile,
3851 };
3852
mlxsw_sp_port_dev_check(const struct net_device * dev)3853 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3854 {
3855 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3856 }
3857
mlxsw_sp_lower_dev_walk(struct net_device * lower_dev,void * data)3858 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3859 {
3860 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3861 int ret = 0;
3862
3863 if (mlxsw_sp_port_dev_check(lower_dev)) {
3864 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3865 ret = 1;
3866 }
3867
3868 return ret;
3869 }
3870
mlxsw_sp_port_dev_lower_find(struct net_device * dev)3871 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3872 {
3873 struct mlxsw_sp_port *mlxsw_sp_port;
3874
3875 if (mlxsw_sp_port_dev_check(dev))
3876 return netdev_priv(dev);
3877
3878 mlxsw_sp_port = NULL;
3879 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3880
3881 return mlxsw_sp_port;
3882 }
3883
mlxsw_sp_lower_get(struct net_device * dev)3884 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3885 {
3886 struct mlxsw_sp_port *mlxsw_sp_port;
3887
3888 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3889 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3890 }
3891
mlxsw_sp_port_dev_lower_find_rcu(struct net_device * dev)3892 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3893 {
3894 struct mlxsw_sp_port *mlxsw_sp_port;
3895
3896 if (mlxsw_sp_port_dev_check(dev))
3897 return netdev_priv(dev);
3898
3899 mlxsw_sp_port = NULL;
3900 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3901 &mlxsw_sp_port);
3902
3903 return mlxsw_sp_port;
3904 }
3905
mlxsw_sp_port_lower_dev_hold(struct net_device * dev)3906 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3907 {
3908 struct mlxsw_sp_port *mlxsw_sp_port;
3909
3910 rcu_read_lock();
3911 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3912 if (mlxsw_sp_port)
3913 dev_hold(mlxsw_sp_port->dev);
3914 rcu_read_unlock();
3915 return mlxsw_sp_port;
3916 }
3917
mlxsw_sp_port_dev_put(struct mlxsw_sp_port * mlxsw_sp_port)3918 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3919 {
3920 dev_put(mlxsw_sp_port->dev);
3921 }
3922
3923 static void
mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)3924 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3925 struct net_device *lag_dev)
3926 {
3927 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3928 struct net_device *upper_dev;
3929 struct list_head *iter;
3930
3931 if (netif_is_bridge_port(lag_dev))
3932 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3933
3934 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3935 if (!netif_is_bridge_port(upper_dev))
3936 continue;
3937 br_dev = netdev_master_upper_dev_get(upper_dev);
3938 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3939 }
3940 }
3941
mlxsw_sp_lag_create(struct mlxsw_sp * mlxsw_sp,u16 lag_id)3942 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3943 {
3944 char sldr_pl[MLXSW_REG_SLDR_LEN];
3945
3946 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3947 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3948 }
3949
mlxsw_sp_lag_destroy(struct mlxsw_sp * mlxsw_sp,u16 lag_id)3950 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3951 {
3952 char sldr_pl[MLXSW_REG_SLDR_LEN];
3953
3954 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3955 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3956 }
3957
mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id,u8 port_index)3958 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3959 u16 lag_id, u8 port_index)
3960 {
3961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3962 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3963
3964 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3965 lag_id, port_index);
3966 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3967 }
3968
mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)3969 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3970 u16 lag_id)
3971 {
3972 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3973 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3974
3975 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3976 lag_id);
3977 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3978 }
3979
mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)3980 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3981 u16 lag_id)
3982 {
3983 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3984 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3985
3986 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3987 lag_id);
3988 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3989 }
3990
mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)3991 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3992 u16 lag_id)
3993 {
3994 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3995 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3996
3997 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3998 lag_id);
3999 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4000 }
4001
mlxsw_sp_lag_index_get(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,u16 * p_lag_id)4002 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4003 struct net_device *lag_dev,
4004 u16 *p_lag_id)
4005 {
4006 struct mlxsw_sp_upper *lag;
4007 int free_lag_id = -1;
4008 u64 max_lag;
4009 int i;
4010
4011 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4012 for (i = 0; i < max_lag; i++) {
4013 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4014 if (lag->ref_count) {
4015 if (lag->dev == lag_dev) {
4016 *p_lag_id = i;
4017 return 0;
4018 }
4019 } else if (free_lag_id < 0) {
4020 free_lag_id = i;
4021 }
4022 }
4023 if (free_lag_id < 0)
4024 return -EBUSY;
4025 *p_lag_id = free_lag_id;
4026 return 0;
4027 }
4028
4029 static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netdev_lag_upper_info * lag_upper_info)4030 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4031 struct net_device *lag_dev,
4032 struct netdev_lag_upper_info *lag_upper_info)
4033 {
4034 u16 lag_id;
4035
4036 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
4037 return false;
4038 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
4039 return false;
4040 return true;
4041 }
4042
mlxsw_sp_port_lag_index_get(struct mlxsw_sp * mlxsw_sp,u16 lag_id,u8 * p_port_index)4043 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4044 u16 lag_id, u8 *p_port_index)
4045 {
4046 u64 max_lag_members;
4047 int i;
4048
4049 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4050 MAX_LAG_MEMBERS);
4051 for (i = 0; i < max_lag_members; i++) {
4052 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4053 *p_port_index = i;
4054 return 0;
4055 }
4056 }
4057 return -EBUSY;
4058 }
4059
mlxsw_sp_port_lag_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4060 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4061 struct net_device *lag_dev)
4062 {
4063 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4064 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4065 struct mlxsw_sp_upper *lag;
4066 u16 lag_id;
4067 u8 port_index;
4068 int err;
4069
4070 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4071 if (err)
4072 return err;
4073 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4074 if (!lag->ref_count) {
4075 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4076 if (err)
4077 return err;
4078 lag->dev = lag_dev;
4079 }
4080
4081 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4082 if (err)
4083 return err;
4084 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4085 if (err)
4086 goto err_col_port_add;
4087
4088 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4089 mlxsw_sp_port->local_port);
4090 mlxsw_sp_port->lag_id = lag_id;
4091 mlxsw_sp_port->lagged = 1;
4092 lag->ref_count++;
4093
4094 /* Port is no longer usable as a router interface */
4095 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4096 if (mlxsw_sp_port_vlan->fid)
4097 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4098
4099 return 0;
4100
4101 err_col_port_add:
4102 if (!lag->ref_count)
4103 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4104 return err;
4105 }
4106
mlxsw_sp_port_lag_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4107 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4108 struct net_device *lag_dev)
4109 {
4110 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4111 u16 lag_id = mlxsw_sp_port->lag_id;
4112 struct mlxsw_sp_upper *lag;
4113
4114 if (!mlxsw_sp_port->lagged)
4115 return;
4116 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4117 WARN_ON(lag->ref_count == 0);
4118
4119 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4120
4121 /* Any VLANs configured on the port are no longer valid */
4122 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4123 /* Make the LAG and its directly linked uppers leave bridges they
4124 * are memeber in
4125 */
4126 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4127
4128 if (lag->ref_count == 1)
4129 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4130
4131 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4132 mlxsw_sp_port->local_port);
4133 mlxsw_sp_port->lagged = 0;
4134 lag->ref_count--;
4135
4136 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4137 /* Make sure untagged frames are allowed to ingress */
4138 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4139 }
4140
mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4141 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4142 u16 lag_id)
4143 {
4144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4145 char sldr_pl[MLXSW_REG_SLDR_LEN];
4146
4147 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4148 mlxsw_sp_port->local_port);
4149 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4150 }
4151
mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4152 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4153 u16 lag_id)
4154 {
4155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4156 char sldr_pl[MLXSW_REG_SLDR_LEN];
4157
4158 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4159 mlxsw_sp_port->local_port);
4160 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4161 }
4162
4163 static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port * mlxsw_sp_port)4164 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4165 {
4166 int err;
4167
4168 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4169 mlxsw_sp_port->lag_id);
4170 if (err)
4171 return err;
4172
4173 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4174 if (err)
4175 goto err_dist_port_add;
4176
4177 return 0;
4178
4179 err_dist_port_add:
4180 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4181 return err;
4182 }
4183
4184 static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port * mlxsw_sp_port)4185 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4186 {
4187 int err;
4188
4189 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4190 mlxsw_sp_port->lag_id);
4191 if (err)
4192 return err;
4193
4194 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4195 mlxsw_sp_port->lag_id);
4196 if (err)
4197 goto err_col_port_disable;
4198
4199 return 0;
4200
4201 err_col_port_disable:
4202 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4203 return err;
4204 }
4205
mlxsw_sp_port_lag_changed(struct mlxsw_sp_port * mlxsw_sp_port,struct netdev_lag_lower_state_info * info)4206 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4207 struct netdev_lag_lower_state_info *info)
4208 {
4209 if (info->tx_enabled)
4210 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4211 else
4212 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4213 }
4214
mlxsw_sp_port_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)4215 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4216 bool enable)
4217 {
4218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4219 enum mlxsw_reg_spms_state spms_state;
4220 char *spms_pl;
4221 u16 vid;
4222 int err;
4223
4224 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4225 MLXSW_REG_SPMS_STATE_DISCARDING;
4226
4227 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4228 if (!spms_pl)
4229 return -ENOMEM;
4230 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4231
4232 for (vid = 0; vid < VLAN_N_VID; vid++)
4233 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4234
4235 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4236 kfree(spms_pl);
4237 return err;
4238 }
4239
mlxsw_sp_port_ovs_join(struct mlxsw_sp_port * mlxsw_sp_port)4240 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4241 {
4242 u16 vid = 1;
4243 int err;
4244
4245 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4246 if (err)
4247 return err;
4248 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4249 if (err)
4250 goto err_port_stp_set;
4251 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4252 true, false);
4253 if (err)
4254 goto err_port_vlan_set;
4255
4256 for (; vid <= VLAN_N_VID - 1; vid++) {
4257 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4258 vid, false);
4259 if (err)
4260 goto err_vid_learning_set;
4261 }
4262
4263 return 0;
4264
4265 err_vid_learning_set:
4266 for (vid--; vid >= 1; vid--)
4267 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4268 err_port_vlan_set:
4269 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4270 err_port_stp_set:
4271 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4272 return err;
4273 }
4274
mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port * mlxsw_sp_port)4275 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4276 {
4277 u16 vid;
4278
4279 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4280 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4281 vid, true);
4282
4283 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4284 false, false);
4285 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4286 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4287 }
4288
mlxsw_sp_netdevice_port_upper_event(struct net_device * lower_dev,struct net_device * dev,unsigned long event,void * ptr)4289 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4290 struct net_device *dev,
4291 unsigned long event, void *ptr)
4292 {
4293 struct netdev_notifier_changeupper_info *info;
4294 struct mlxsw_sp_port *mlxsw_sp_port;
4295 struct net_device *upper_dev;
4296 struct mlxsw_sp *mlxsw_sp;
4297 int err = 0;
4298
4299 mlxsw_sp_port = netdev_priv(dev);
4300 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4301 info = ptr;
4302
4303 switch (event) {
4304 case NETDEV_PRECHANGEUPPER:
4305 upper_dev = info->upper_dev;
4306 if (!is_vlan_dev(upper_dev) &&
4307 !netif_is_lag_master(upper_dev) &&
4308 !netif_is_bridge_master(upper_dev) &&
4309 !netif_is_ovs_master(upper_dev))
4310 return -EINVAL;
4311 if (!info->linking)
4312 break;
4313 if (netdev_has_any_upper_dev(upper_dev) &&
4314 (!netif_is_bridge_master(upper_dev) ||
4315 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4316 upper_dev)))
4317 return -EINVAL;
4318 if (netif_is_lag_master(upper_dev) &&
4319 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4320 info->upper_info))
4321 return -EINVAL;
4322 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4323 return -EINVAL;
4324 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4325 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4326 return -EINVAL;
4327 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4328 return -EINVAL;
4329 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4330 return -EINVAL;
4331 break;
4332 case NETDEV_CHANGEUPPER:
4333 upper_dev = info->upper_dev;
4334 if (netif_is_bridge_master(upper_dev)) {
4335 if (info->linking)
4336 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4337 lower_dev,
4338 upper_dev);
4339 else
4340 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4341 lower_dev,
4342 upper_dev);
4343 } else if (netif_is_lag_master(upper_dev)) {
4344 if (info->linking) {
4345 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4346 upper_dev);
4347 } else {
4348 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4349 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4350 upper_dev);
4351 }
4352 } else if (netif_is_ovs_master(upper_dev)) {
4353 if (info->linking)
4354 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4355 else
4356 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4357 }
4358 break;
4359 }
4360
4361 return err;
4362 }
4363
mlxsw_sp_netdevice_port_lower_event(struct net_device * dev,unsigned long event,void * ptr)4364 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4365 unsigned long event, void *ptr)
4366 {
4367 struct netdev_notifier_changelowerstate_info *info;
4368 struct mlxsw_sp_port *mlxsw_sp_port;
4369 int err;
4370
4371 mlxsw_sp_port = netdev_priv(dev);
4372 info = ptr;
4373
4374 switch (event) {
4375 case NETDEV_CHANGELOWERSTATE:
4376 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4377 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4378 info->lower_state_info);
4379 if (err)
4380 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4381 }
4382 break;
4383 }
4384
4385 return 0;
4386 }
4387
mlxsw_sp_netdevice_port_event(struct net_device * lower_dev,struct net_device * port_dev,unsigned long event,void * ptr)4388 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4389 struct net_device *port_dev,
4390 unsigned long event, void *ptr)
4391 {
4392 switch (event) {
4393 case NETDEV_PRECHANGEUPPER:
4394 case NETDEV_CHANGEUPPER:
4395 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4396 event, ptr);
4397 case NETDEV_CHANGELOWERSTATE:
4398 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4399 ptr);
4400 }
4401
4402 return 0;
4403 }
4404
mlxsw_sp_netdevice_lag_event(struct net_device * lag_dev,unsigned long event,void * ptr)4405 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4406 unsigned long event, void *ptr)
4407 {
4408 struct net_device *dev;
4409 struct list_head *iter;
4410 int ret;
4411
4412 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4413 if (mlxsw_sp_port_dev_check(dev)) {
4414 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4415 ptr);
4416 if (ret)
4417 return ret;
4418 }
4419 }
4420
4421 return 0;
4422 }
4423
mlxsw_sp_netdevice_port_vlan_event(struct net_device * vlan_dev,struct net_device * dev,unsigned long event,void * ptr,u16 vid)4424 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4425 struct net_device *dev,
4426 unsigned long event, void *ptr,
4427 u16 vid)
4428 {
4429 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4431 struct netdev_notifier_changeupper_info *info = ptr;
4432 struct net_device *upper_dev;
4433 int err = 0;
4434
4435 switch (event) {
4436 case NETDEV_PRECHANGEUPPER:
4437 upper_dev = info->upper_dev;
4438 if (!netif_is_bridge_master(upper_dev))
4439 return -EINVAL;
4440 if (!info->linking)
4441 break;
4442 if (netdev_has_any_upper_dev(upper_dev) &&
4443 (!netif_is_bridge_master(upper_dev) ||
4444 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4445 upper_dev)))
4446 return -EINVAL;
4447 break;
4448 case NETDEV_CHANGEUPPER:
4449 upper_dev = info->upper_dev;
4450 if (netif_is_bridge_master(upper_dev)) {
4451 if (info->linking)
4452 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4453 vlan_dev,
4454 upper_dev);
4455 else
4456 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4457 vlan_dev,
4458 upper_dev);
4459 } else {
4460 err = -EINVAL;
4461 WARN_ON(1);
4462 }
4463 break;
4464 }
4465
4466 return err;
4467 }
4468
mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device * vlan_dev,struct net_device * lag_dev,unsigned long event,void * ptr,u16 vid)4469 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4470 struct net_device *lag_dev,
4471 unsigned long event,
4472 void *ptr, u16 vid)
4473 {
4474 struct net_device *dev;
4475 struct list_head *iter;
4476 int ret;
4477
4478 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4479 if (mlxsw_sp_port_dev_check(dev)) {
4480 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4481 event, ptr,
4482 vid);
4483 if (ret)
4484 return ret;
4485 }
4486 }
4487
4488 return 0;
4489 }
4490
mlxsw_sp_netdevice_vlan_event(struct net_device * vlan_dev,unsigned long event,void * ptr)4491 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4492 unsigned long event, void *ptr)
4493 {
4494 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4495 u16 vid = vlan_dev_vlan_id(vlan_dev);
4496
4497 if (mlxsw_sp_port_dev_check(real_dev))
4498 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4499 event, ptr, vid);
4500 else if (netif_is_lag_master(real_dev))
4501 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4502 real_dev, event,
4503 ptr, vid);
4504
4505 return 0;
4506 }
4507
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)4508 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4509 {
4510 struct netdev_notifier_changeupper_info *info = ptr;
4511
4512 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4513 return false;
4514 return netif_is_l3_master(info->upper_dev);
4515 }
4516
mlxsw_sp_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)4517 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4518 unsigned long event, void *ptr)
4519 {
4520 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4521 int err = 0;
4522
4523 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4524 err = mlxsw_sp_netdevice_router_port_event(dev);
4525 else if (mlxsw_sp_is_vrf_event(event, ptr))
4526 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4527 else if (mlxsw_sp_port_dev_check(dev))
4528 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4529 else if (netif_is_lag_master(dev))
4530 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4531 else if (is_vlan_dev(dev))
4532 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4533
4534 return notifier_from_errno(err);
4535 }
4536
4537 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4538 .notifier_call = mlxsw_sp_netdevice_event,
4539 };
4540
4541 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4542 .notifier_call = mlxsw_sp_inetaddr_event,
4543 .priority = 10, /* Must be called before FIB notifier block */
4544 };
4545
4546 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4547 .notifier_call = mlxsw_sp_inet6addr_event,
4548 };
4549
4550 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4551 .notifier_call = mlxsw_sp_router_netevent_event,
4552 };
4553
4554 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4555 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4556 {0, },
4557 };
4558
4559 static struct pci_driver mlxsw_sp_pci_driver = {
4560 .name = mlxsw_sp_driver_name,
4561 .id_table = mlxsw_sp_pci_id_table,
4562 };
4563
mlxsw_sp_module_init(void)4564 static int __init mlxsw_sp_module_init(void)
4565 {
4566 int err;
4567
4568 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4569 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4570 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4571 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4572
4573 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4574 if (err)
4575 goto err_core_driver_register;
4576
4577 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4578 if (err)
4579 goto err_pci_driver_register;
4580
4581 return 0;
4582
4583 err_pci_driver_register:
4584 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4585 err_core_driver_register:
4586 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4587 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4588 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4589 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4590 return err;
4591 }
4592
mlxsw_sp_module_exit(void)4593 static void __exit mlxsw_sp_module_exit(void)
4594 {
4595 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4596 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4597 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4598 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4599 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4600 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4601 }
4602
4603 module_init(mlxsw_sp_module_init);
4604 module_exit(mlxsw_sp_module_exit);
4605
4606 MODULE_LICENSE("Dual BSD/GPL");
4607 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4608 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4609 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4610 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
4611