• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
8 #include <linux/err.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <linux/firmware.h>
24 #include <asm/byteorder.h>
25 #include <net/devlink.h>
26 #include <trace/events/devlink.h>
27 
28 #include "core.h"
29 #include "core_env.h"
30 #include "item.h"
31 #include "cmd.h"
32 #include "port.h"
33 #include "trap.h"
34 #include "emad.h"
35 #include "reg.h"
36 #include "resources.h"
37 #include "../mlxfw/mlxfw.h"
38 
39 static LIST_HEAD(mlxsw_core_driver_list);
40 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
41 
42 static const char mlxsw_core_driver_name[] = "mlxsw_core";
43 
44 static struct workqueue_struct *mlxsw_wq;
45 static struct workqueue_struct *mlxsw_owq;
46 
47 struct mlxsw_core_port {
48 	struct devlink_port devlink_port;
49 	void *port_driver_priv;
50 	u8 local_port;
51 };
52 
mlxsw_core_port_driver_priv(struct mlxsw_core_port * mlxsw_core_port)53 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
54 {
55 	return mlxsw_core_port->port_driver_priv;
56 }
57 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
58 
mlxsw_core_port_check(struct mlxsw_core_port * mlxsw_core_port)59 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
60 {
61 	return mlxsw_core_port->port_driver_priv != NULL;
62 }
63 
64 struct mlxsw_core {
65 	struct mlxsw_driver *driver;
66 	const struct mlxsw_bus *bus;
67 	void *bus_priv;
68 	const struct mlxsw_bus_info *bus_info;
69 	struct workqueue_struct *emad_wq;
70 	struct list_head rx_listener_list;
71 	struct list_head event_listener_list;
72 	struct {
73 		atomic64_t tid;
74 		struct list_head trans_list;
75 		spinlock_t trans_list_lock; /* protects trans_list writes */
76 		bool use_emad;
77 		bool enable_string_tlv;
78 	} emad;
79 	struct {
80 		u8 *mapping; /* lag_id+port_index to local_port mapping */
81 	} lag;
82 	struct mlxsw_res res;
83 	struct mlxsw_hwmon *hwmon;
84 	struct mlxsw_thermal *thermal;
85 	struct mlxsw_core_port *ports;
86 	unsigned int max_ports;
87 	bool fw_flash_in_progress;
88 	struct {
89 		struct devlink_health_reporter *fw_fatal;
90 	} health;
91 	struct mlxsw_env *env;
92 	bool is_initialized; /* Denotes if core was already initialized. */
93 	unsigned long driver_priv[];
94 	/* driver_priv has to be always the last item */
95 };
96 
97 #define MLXSW_PORT_MAX_PORTS_DEFAULT	0x40
98 
mlxsw_ports_init(struct mlxsw_core * mlxsw_core)99 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
100 {
101 	/* Switch ports are numbered from 1 to queried value */
102 	if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
103 		mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
104 							   MAX_SYSTEM_PORT) + 1;
105 	else
106 		mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
107 
108 	mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
109 				    sizeof(struct mlxsw_core_port), GFP_KERNEL);
110 	if (!mlxsw_core->ports)
111 		return -ENOMEM;
112 
113 	return 0;
114 }
115 
mlxsw_ports_fini(struct mlxsw_core * mlxsw_core)116 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
117 {
118 	kfree(mlxsw_core->ports);
119 }
120 
mlxsw_core_max_ports(const struct mlxsw_core * mlxsw_core)121 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
122 {
123 	return mlxsw_core->max_ports;
124 }
125 EXPORT_SYMBOL(mlxsw_core_max_ports);
126 
mlxsw_core_driver_priv(struct mlxsw_core * mlxsw_core)127 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
128 {
129 	return mlxsw_core->driver_priv;
130 }
131 EXPORT_SYMBOL(mlxsw_core_driver_priv);
132 
mlxsw_core_res_query_enabled(const struct mlxsw_core * mlxsw_core)133 bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
134 {
135 	return mlxsw_core->driver->res_query_enabled;
136 }
137 EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
138 
mlxsw_core_temp_warn_enabled(const struct mlxsw_core * mlxsw_core)139 bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
140 {
141 	return mlxsw_core->driver->temp_warn_enabled;
142 }
143 
144 bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev * rev,const struct mlxsw_fw_rev * req_rev)145 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
146 					  const struct mlxsw_fw_rev *req_rev)
147 {
148 	return rev->minor > req_rev->minor ||
149 	       (rev->minor == req_rev->minor &&
150 		rev->subminor >= req_rev->subminor);
151 }
152 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
153 
154 struct mlxsw_rx_listener_item {
155 	struct list_head list;
156 	struct mlxsw_rx_listener rxl;
157 	void *priv;
158 	bool enabled;
159 };
160 
161 struct mlxsw_event_listener_item {
162 	struct list_head list;
163 	struct mlxsw_event_listener el;
164 	void *priv;
165 };
166 
167 /******************
168  * EMAD processing
169  ******************/
170 
171 /* emad_eth_hdr_dmac
172  * Destination MAC in EMAD's Ethernet header.
173  * Must be set to 01:02:c9:00:00:01
174  */
175 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
176 
177 /* emad_eth_hdr_smac
178  * Source MAC in EMAD's Ethernet header.
179  * Must be set to 00:02:c9:01:02:03
180  */
181 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
182 
183 /* emad_eth_hdr_ethertype
184  * Ethertype in EMAD's Ethernet header.
185  * Must be set to 0x8932
186  */
187 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
188 
189 /* emad_eth_hdr_mlx_proto
190  * Mellanox protocol.
191  * Must be set to 0x0.
192  */
193 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
194 
195 /* emad_eth_hdr_ver
196  * Mellanox protocol version.
197  * Must be set to 0x0.
198  */
199 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
200 
201 /* emad_op_tlv_type
202  * Type of the TLV.
203  * Must be set to 0x1 (operation TLV).
204  */
205 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
206 
207 /* emad_op_tlv_len
208  * Length of the operation TLV in u32.
209  * Must be set to 0x4.
210  */
211 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
212 
213 /* emad_op_tlv_dr
214  * Direct route bit. Setting to 1 indicates the EMAD is a direct route
215  * EMAD. DR TLV must follow.
216  *
217  * Note: Currently not supported and must not be set.
218  */
219 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
220 
221 /* emad_op_tlv_status
222  * Returned status in case of EMAD response. Must be set to 0 in case
223  * of EMAD request.
224  * 0x0 - success
225  * 0x1 - device is busy. Requester should retry
226  * 0x2 - Mellanox protocol version not supported
227  * 0x3 - unknown TLV
228  * 0x4 - register not supported
229  * 0x5 - operation class not supported
230  * 0x6 - EMAD method not supported
231  * 0x7 - bad parameter (e.g. port out of range)
232  * 0x8 - resource not available
233  * 0x9 - message receipt acknowledgment. Requester should retry
234  * 0x70 - internal error
235  */
236 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
237 
238 /* emad_op_tlv_register_id
239  * Register ID of register within register TLV.
240  */
241 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
242 
243 /* emad_op_tlv_r
244  * Response bit. Setting to 1 indicates Response, otherwise request.
245  */
246 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
247 
248 /* emad_op_tlv_method
249  * EMAD method type.
250  * 0x1 - query
251  * 0x2 - write
252  * 0x3 - send (currently not supported)
253  * 0x4 - event
254  */
255 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
256 
257 /* emad_op_tlv_class
258  * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
259  */
260 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
261 
262 /* emad_op_tlv_tid
263  * EMAD transaction ID. Used for pairing request and response EMADs.
264  */
265 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
266 
267 /* emad_string_tlv_type
268  * Type of the TLV.
269  * Must be set to 0x2 (string TLV).
270  */
271 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
272 
273 /* emad_string_tlv_len
274  * Length of the string TLV in u32.
275  */
276 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
277 
278 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
279 
280 /* emad_string_tlv_string
281  * String provided by the device's firmware in case of erroneous register access
282  */
283 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
284 	       MLXSW_EMAD_STRING_TLV_STRING_LEN);
285 
286 /* emad_reg_tlv_type
287  * Type of the TLV.
288  * Must be set to 0x3 (register TLV).
289  */
290 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
291 
292 /* emad_reg_tlv_len
293  * Length of the operation TLV in u32.
294  */
295 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
296 
297 /* emad_end_tlv_type
298  * Type of the TLV.
299  * Must be set to 0x0 (end TLV).
300  */
301 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
302 
303 /* emad_end_tlv_len
304  * Length of the end TLV in u32.
305  * Must be set to 1.
306  */
307 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
308 
309 enum mlxsw_core_reg_access_type {
310 	MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
311 	MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
312 };
313 
314 static inline const char *
mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)315 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
316 {
317 	switch (type) {
318 	case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
319 		return "query";
320 	case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
321 		return "write";
322 	}
323 	BUG();
324 }
325 
mlxsw_emad_pack_end_tlv(char * end_tlv)326 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
327 {
328 	mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
329 	mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
330 }
331 
mlxsw_emad_pack_reg_tlv(char * reg_tlv,const struct mlxsw_reg_info * reg,char * payload)332 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
333 				    const struct mlxsw_reg_info *reg,
334 				    char *payload)
335 {
336 	mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
337 	mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
338 	memcpy(reg_tlv + sizeof(u32), payload, reg->len);
339 }
340 
mlxsw_emad_pack_string_tlv(char * string_tlv)341 static void mlxsw_emad_pack_string_tlv(char *string_tlv)
342 {
343 	mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
344 	mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
345 }
346 
mlxsw_emad_pack_op_tlv(char * op_tlv,const struct mlxsw_reg_info * reg,enum mlxsw_core_reg_access_type type,u64 tid)347 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
348 				   const struct mlxsw_reg_info *reg,
349 				   enum mlxsw_core_reg_access_type type,
350 				   u64 tid)
351 {
352 	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
353 	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
354 	mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
355 	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
356 	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
357 	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
358 	if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
359 		mlxsw_emad_op_tlv_method_set(op_tlv,
360 					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
361 	else
362 		mlxsw_emad_op_tlv_method_set(op_tlv,
363 					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
364 	mlxsw_emad_op_tlv_class_set(op_tlv,
365 				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
366 	mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
367 }
368 
mlxsw_emad_construct_eth_hdr(struct sk_buff * skb)369 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
370 {
371 	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
372 
373 	mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
374 	mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
375 	mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
376 	mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
377 	mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
378 
379 	skb_reset_mac_header(skb);
380 
381 	return 0;
382 }
383 
mlxsw_emad_construct(struct sk_buff * skb,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,u64 tid,bool enable_string_tlv)384 static void mlxsw_emad_construct(struct sk_buff *skb,
385 				 const struct mlxsw_reg_info *reg,
386 				 char *payload,
387 				 enum mlxsw_core_reg_access_type type,
388 				 u64 tid, bool enable_string_tlv)
389 {
390 	char *buf;
391 
392 	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
393 	mlxsw_emad_pack_end_tlv(buf);
394 
395 	buf = skb_push(skb, reg->len + sizeof(u32));
396 	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
397 
398 	if (enable_string_tlv) {
399 		buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
400 		mlxsw_emad_pack_string_tlv(buf);
401 	}
402 
403 	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
404 	mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
405 
406 	mlxsw_emad_construct_eth_hdr(skb);
407 }
408 
409 struct mlxsw_emad_tlv_offsets {
410 	u16 op_tlv;
411 	u16 string_tlv;
412 	u16 reg_tlv;
413 };
414 
mlxsw_emad_tlv_is_string_tlv(const char * tlv)415 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
416 {
417 	u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
418 
419 	return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
420 }
421 
mlxsw_emad_tlv_parse(struct sk_buff * skb)422 static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
423 {
424 	struct mlxsw_emad_tlv_offsets *offsets =
425 		(struct mlxsw_emad_tlv_offsets *) skb->cb;
426 
427 	offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
428 	offsets->string_tlv = 0;
429 	offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
430 			   MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
431 
432 	/* If string TLV is present, it must come after the operation TLV. */
433 	if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
434 		offsets->string_tlv = offsets->reg_tlv;
435 		offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
436 	}
437 }
438 
mlxsw_emad_op_tlv(const struct sk_buff * skb)439 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
440 {
441 	struct mlxsw_emad_tlv_offsets *offsets =
442 		(struct mlxsw_emad_tlv_offsets *) skb->cb;
443 
444 	return ((char *) (skb->data + offsets->op_tlv));
445 }
446 
mlxsw_emad_string_tlv(const struct sk_buff * skb)447 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
448 {
449 	struct mlxsw_emad_tlv_offsets *offsets =
450 		(struct mlxsw_emad_tlv_offsets *) skb->cb;
451 
452 	if (!offsets->string_tlv)
453 		return NULL;
454 
455 	return ((char *) (skb->data + offsets->string_tlv));
456 }
457 
mlxsw_emad_reg_tlv(const struct sk_buff * skb)458 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
459 {
460 	struct mlxsw_emad_tlv_offsets *offsets =
461 		(struct mlxsw_emad_tlv_offsets *) skb->cb;
462 
463 	return ((char *) (skb->data + offsets->reg_tlv));
464 }
465 
mlxsw_emad_reg_payload(const char * reg_tlv)466 static char *mlxsw_emad_reg_payload(const char *reg_tlv)
467 {
468 	return ((char *) (reg_tlv + sizeof(u32)));
469 }
470 
mlxsw_emad_reg_payload_cmd(const char * mbox)471 static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
472 {
473 	return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
474 }
475 
mlxsw_emad_get_tid(const struct sk_buff * skb)476 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
477 {
478 	char *op_tlv;
479 
480 	op_tlv = mlxsw_emad_op_tlv(skb);
481 	return mlxsw_emad_op_tlv_tid_get(op_tlv);
482 }
483 
mlxsw_emad_is_resp(const struct sk_buff * skb)484 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
485 {
486 	char *op_tlv;
487 
488 	op_tlv = mlxsw_emad_op_tlv(skb);
489 	return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
490 }
491 
mlxsw_emad_process_status(char * op_tlv,enum mlxsw_emad_op_tlv_status * p_status)492 static int mlxsw_emad_process_status(char *op_tlv,
493 				     enum mlxsw_emad_op_tlv_status *p_status)
494 {
495 	*p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
496 
497 	switch (*p_status) {
498 	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
499 		return 0;
500 	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
501 	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
502 		return -EAGAIN;
503 	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
504 	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
505 	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
506 	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
507 	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
508 	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
509 	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
510 	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
511 	default:
512 		return -EIO;
513 	}
514 }
515 
516 static int
mlxsw_emad_process_status_skb(struct sk_buff * skb,enum mlxsw_emad_op_tlv_status * p_status)517 mlxsw_emad_process_status_skb(struct sk_buff *skb,
518 			      enum mlxsw_emad_op_tlv_status *p_status)
519 {
520 	return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
521 }
522 
523 struct mlxsw_reg_trans {
524 	struct list_head list;
525 	struct list_head bulk_list;
526 	struct mlxsw_core *core;
527 	struct sk_buff *tx_skb;
528 	struct mlxsw_tx_info tx_info;
529 	struct delayed_work timeout_dw;
530 	unsigned int retries;
531 	u64 tid;
532 	struct completion completion;
533 	atomic_t active;
534 	mlxsw_reg_trans_cb_t *cb;
535 	unsigned long cb_priv;
536 	const struct mlxsw_reg_info *reg;
537 	enum mlxsw_core_reg_access_type type;
538 	int err;
539 	char *emad_err_string;
540 	enum mlxsw_emad_op_tlv_status emad_status;
541 	struct rcu_head rcu;
542 };
543 
mlxsw_emad_process_string_tlv(const struct sk_buff * skb,struct mlxsw_reg_trans * trans)544 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
545 					  struct mlxsw_reg_trans *trans)
546 {
547 	char *string_tlv;
548 	char *string;
549 
550 	string_tlv = mlxsw_emad_string_tlv(skb);
551 	if (!string_tlv)
552 		return;
553 
554 	trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
555 					 GFP_ATOMIC);
556 	if (!trans->emad_err_string)
557 		return;
558 
559 	string = mlxsw_emad_string_tlv_string_data(string_tlv);
560 	strlcpy(trans->emad_err_string, string,
561 		MLXSW_EMAD_STRING_TLV_STRING_LEN);
562 }
563 
564 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS	3000
565 #define MLXSW_EMAD_TIMEOUT_MS			200
566 
mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans * trans)567 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
568 {
569 	unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
570 
571 	if (trans->core->fw_flash_in_progress)
572 		timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
573 
574 	queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
575 			   timeout << trans->retries);
576 }
577 
mlxsw_emad_transmit(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)578 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
579 			       struct mlxsw_reg_trans *trans)
580 {
581 	struct sk_buff *skb;
582 	int err;
583 
584 	skb = skb_copy(trans->tx_skb, GFP_KERNEL);
585 	if (!skb)
586 		return -ENOMEM;
587 
588 	trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
589 			    skb->data + mlxsw_core->driver->txhdr_len,
590 			    skb->len - mlxsw_core->driver->txhdr_len);
591 
592 	atomic_set(&trans->active, 1);
593 	err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
594 	if (err) {
595 		dev_kfree_skb(skb);
596 		return err;
597 	}
598 	mlxsw_emad_trans_timeout_schedule(trans);
599 	return 0;
600 }
601 
mlxsw_emad_trans_finish(struct mlxsw_reg_trans * trans,int err)602 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
603 {
604 	struct mlxsw_core *mlxsw_core = trans->core;
605 
606 	dev_kfree_skb(trans->tx_skb);
607 	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
608 	list_del_rcu(&trans->list);
609 	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
610 	trans->err = err;
611 	complete(&trans->completion);
612 }
613 
mlxsw_emad_transmit_retry(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)614 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
615 				      struct mlxsw_reg_trans *trans)
616 {
617 	int err;
618 
619 	if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
620 		trans->retries++;
621 		err = mlxsw_emad_transmit(trans->core, trans);
622 		if (err == 0)
623 			return;
624 
625 		if (!atomic_dec_and_test(&trans->active))
626 			return;
627 	} else {
628 		err = -EIO;
629 	}
630 	mlxsw_emad_trans_finish(trans, err);
631 }
632 
mlxsw_emad_trans_timeout_work(struct work_struct * work)633 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
634 {
635 	struct mlxsw_reg_trans *trans = container_of(work,
636 						     struct mlxsw_reg_trans,
637 						     timeout_dw.work);
638 
639 	if (!atomic_dec_and_test(&trans->active))
640 		return;
641 
642 	mlxsw_emad_transmit_retry(trans->core, trans);
643 }
644 
mlxsw_emad_process_response(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans,struct sk_buff * skb)645 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
646 					struct mlxsw_reg_trans *trans,
647 					struct sk_buff *skb)
648 {
649 	int err;
650 
651 	if (!atomic_dec_and_test(&trans->active))
652 		return;
653 
654 	err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
655 	if (err == -EAGAIN) {
656 		mlxsw_emad_transmit_retry(mlxsw_core, trans);
657 	} else {
658 		if (err == 0) {
659 			char *reg_tlv = mlxsw_emad_reg_tlv(skb);
660 
661 			if (trans->cb)
662 				trans->cb(mlxsw_core,
663 					  mlxsw_emad_reg_payload(reg_tlv),
664 					  trans->reg->len, trans->cb_priv);
665 		} else {
666 			mlxsw_emad_process_string_tlv(skb, trans);
667 		}
668 		mlxsw_emad_trans_finish(trans, err);
669 	}
670 }
671 
672 /* called with rcu read lock held */
mlxsw_emad_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)673 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
674 					void *priv)
675 {
676 	struct mlxsw_core *mlxsw_core = priv;
677 	struct mlxsw_reg_trans *trans;
678 
679 	trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
680 			    skb->data, skb->len);
681 
682 	mlxsw_emad_tlv_parse(skb);
683 
684 	if (!mlxsw_emad_is_resp(skb))
685 		goto free_skb;
686 
687 	list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
688 		if (mlxsw_emad_get_tid(skb) == trans->tid) {
689 			mlxsw_emad_process_response(mlxsw_core, trans, skb);
690 			break;
691 		}
692 	}
693 
694 free_skb:
695 	dev_kfree_skb(skb);
696 }
697 
698 static const struct mlxsw_listener mlxsw_emad_rx_listener =
699 	MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
700 		  EMAD, DISCARD);
701 
mlxsw_emad_init(struct mlxsw_core * mlxsw_core)702 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
703 {
704 	struct workqueue_struct *emad_wq;
705 	u64 tid;
706 	int err;
707 
708 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
709 		return 0;
710 
711 	emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
712 	if (!emad_wq)
713 		return -ENOMEM;
714 	mlxsw_core->emad_wq = emad_wq;
715 
716 	/* Set the upper 32 bits of the transaction ID field to a random
717 	 * number. This allows us to discard EMADs addressed to other
718 	 * devices.
719 	 */
720 	get_random_bytes(&tid, 4);
721 	tid <<= 32;
722 	atomic64_set(&mlxsw_core->emad.tid, tid);
723 
724 	INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
725 	spin_lock_init(&mlxsw_core->emad.trans_list_lock);
726 
727 	err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
728 				       mlxsw_core);
729 	if (err)
730 		goto err_trap_register;
731 
732 	err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
733 	if (err)
734 		goto err_emad_trap_set;
735 	mlxsw_core->emad.use_emad = true;
736 
737 	return 0;
738 
739 err_emad_trap_set:
740 	mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
741 				   mlxsw_core);
742 err_trap_register:
743 	destroy_workqueue(mlxsw_core->emad_wq);
744 	return err;
745 }
746 
mlxsw_emad_fini(struct mlxsw_core * mlxsw_core)747 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
748 {
749 
750 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
751 		return;
752 
753 	mlxsw_core->emad.use_emad = false;
754 	mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
755 				   mlxsw_core);
756 	destroy_workqueue(mlxsw_core->emad_wq);
757 }
758 
mlxsw_emad_alloc(const struct mlxsw_core * mlxsw_core,u16 reg_len,bool enable_string_tlv)759 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
760 					u16 reg_len, bool enable_string_tlv)
761 {
762 	struct sk_buff *skb;
763 	u16 emad_len;
764 
765 	emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
766 		    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
767 		    sizeof(u32) + mlxsw_core->driver->txhdr_len);
768 	if (enable_string_tlv)
769 		emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
770 	if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
771 		return NULL;
772 
773 	skb = netdev_alloc_skb(NULL, emad_len);
774 	if (!skb)
775 		return NULL;
776 	memset(skb->data, 0, emad_len);
777 	skb_reserve(skb, emad_len);
778 
779 	return skb;
780 }
781 
mlxsw_emad_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct mlxsw_reg_trans * trans,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv,u64 tid)782 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
783 				 const struct mlxsw_reg_info *reg,
784 				 char *payload,
785 				 enum mlxsw_core_reg_access_type type,
786 				 struct mlxsw_reg_trans *trans,
787 				 struct list_head *bulk_list,
788 				 mlxsw_reg_trans_cb_t *cb,
789 				 unsigned long cb_priv, u64 tid)
790 {
791 	bool enable_string_tlv;
792 	struct sk_buff *skb;
793 	int err;
794 
795 	dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
796 		tid, reg->id, mlxsw_reg_id_str(reg->id),
797 		mlxsw_core_reg_access_type_str(type));
798 
799 	/* Since this can be changed during emad_reg_access, read it once and
800 	 * use the value all the way.
801 	 */
802 	enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
803 
804 	skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
805 	if (!skb)
806 		return -ENOMEM;
807 
808 	list_add_tail(&trans->bulk_list, bulk_list);
809 	trans->core = mlxsw_core;
810 	trans->tx_skb = skb;
811 	trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
812 	trans->tx_info.is_emad = true;
813 	INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
814 	trans->tid = tid;
815 	init_completion(&trans->completion);
816 	trans->cb = cb;
817 	trans->cb_priv = cb_priv;
818 	trans->reg = reg;
819 	trans->type = type;
820 
821 	mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
822 			     enable_string_tlv);
823 	mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
824 
825 	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
826 	list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
827 	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
828 	err = mlxsw_emad_transmit(mlxsw_core, trans);
829 	if (err)
830 		goto err_out;
831 	return 0;
832 
833 err_out:
834 	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
835 	list_del_rcu(&trans->list);
836 	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
837 	list_del(&trans->bulk_list);
838 	dev_kfree_skb(trans->tx_skb);
839 	return err;
840 }
841 
842 /*****************
843  * Core functions
844  *****************/
845 
mlxsw_core_driver_register(struct mlxsw_driver * mlxsw_driver)846 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
847 {
848 	spin_lock(&mlxsw_core_driver_list_lock);
849 	list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
850 	spin_unlock(&mlxsw_core_driver_list_lock);
851 	return 0;
852 }
853 EXPORT_SYMBOL(mlxsw_core_driver_register);
854 
mlxsw_core_driver_unregister(struct mlxsw_driver * mlxsw_driver)855 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
856 {
857 	spin_lock(&mlxsw_core_driver_list_lock);
858 	list_del(&mlxsw_driver->list);
859 	spin_unlock(&mlxsw_core_driver_list_lock);
860 }
861 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
862 
__driver_find(const char * kind)863 static struct mlxsw_driver *__driver_find(const char *kind)
864 {
865 	struct mlxsw_driver *mlxsw_driver;
866 
867 	list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
868 		if (strcmp(mlxsw_driver->kind, kind) == 0)
869 			return mlxsw_driver;
870 	}
871 	return NULL;
872 }
873 
mlxsw_core_driver_get(const char * kind)874 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
875 {
876 	struct mlxsw_driver *mlxsw_driver;
877 
878 	spin_lock(&mlxsw_core_driver_list_lock);
879 	mlxsw_driver = __driver_find(kind);
880 	spin_unlock(&mlxsw_core_driver_list_lock);
881 	return mlxsw_driver;
882 }
883 
884 struct mlxsw_core_fw_info {
885 	struct mlxfw_dev mlxfw_dev;
886 	struct mlxsw_core *mlxsw_core;
887 };
888 
mlxsw_core_fw_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)889 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
890 					 u16 component_index, u32 *p_max_size,
891 					 u8 *p_align_bits, u16 *p_max_write_size)
892 {
893 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
894 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
895 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
896 	char mcqi_pl[MLXSW_REG_MCQI_LEN];
897 	int err;
898 
899 	mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
900 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
901 	if (err)
902 		return err;
903 	mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
904 
905 	*p_align_bits = max_t(u8, *p_align_bits, 2);
906 	*p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
907 	return 0;
908 }
909 
mlxsw_core_fw_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)910 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
911 {
912 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
913 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
914 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
915 	char mcc_pl[MLXSW_REG_MCC_LEN];
916 	u8 control_state;
917 	int err;
918 
919 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
920 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
921 	if (err)
922 		return err;
923 
924 	mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
925 	if (control_state != MLXFW_FSM_STATE_IDLE)
926 		return -EBUSY;
927 
928 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
929 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
930 }
931 
mlxsw_core_fw_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)932 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
933 					      u16 component_index, u32 component_size)
934 {
935 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
936 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
937 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
938 	char mcc_pl[MLXSW_REG_MCC_LEN];
939 
940 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
941 			   component_index, fwhandle, component_size);
942 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
943 }
944 
mlxsw_core_fw_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)945 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
946 					    u8 *data, u16 size, u32 offset)
947 {
948 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
949 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
950 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
951 	char mcda_pl[MLXSW_REG_MCDA_LEN];
952 
953 	mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
954 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
955 }
956 
mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)957 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
958 					      u16 component_index)
959 {
960 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
961 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
962 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
963 	char mcc_pl[MLXSW_REG_MCC_LEN];
964 
965 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
966 			   component_index, fwhandle, 0);
967 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
968 }
969 
mlxsw_core_fw_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)970 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
971 {
972 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
973 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
974 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
975 	char mcc_pl[MLXSW_REG_MCC_LEN];
976 
977 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
978 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
979 }
980 
mlxsw_core_fw_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)981 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
982 					 enum mlxfw_fsm_state *fsm_state,
983 					 enum mlxfw_fsm_state_err *fsm_state_err)
984 {
985 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
986 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
987 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
988 	char mcc_pl[MLXSW_REG_MCC_LEN];
989 	u8 control_state;
990 	u8 error_code;
991 	int err;
992 
993 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
994 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
995 	if (err)
996 		return err;
997 
998 	mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
999 	*fsm_state = control_state;
1000 	*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
1001 	return 0;
1002 }
1003 
mlxsw_core_fw_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1004 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1005 {
1006 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1007 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1008 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1009 	char mcc_pl[MLXSW_REG_MCC_LEN];
1010 
1011 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
1012 	mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1013 }
1014 
mlxsw_core_fw_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1015 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1016 {
1017 	struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1018 		container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1019 	struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1020 	char mcc_pl[MLXSW_REG_MCC_LEN];
1021 
1022 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
1023 	mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1024 }
1025 
1026 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
1027 	.component_query	= mlxsw_core_fw_component_query,
1028 	.fsm_lock		= mlxsw_core_fw_fsm_lock,
1029 	.fsm_component_update	= mlxsw_core_fw_fsm_component_update,
1030 	.fsm_block_download	= mlxsw_core_fw_fsm_block_download,
1031 	.fsm_component_verify	= mlxsw_core_fw_fsm_component_verify,
1032 	.fsm_activate		= mlxsw_core_fw_fsm_activate,
1033 	.fsm_query_state	= mlxsw_core_fw_fsm_query_state,
1034 	.fsm_cancel		= mlxsw_core_fw_fsm_cancel,
1035 	.fsm_release		= mlxsw_core_fw_fsm_release,
1036 };
1037 
mlxsw_core_fw_flash(struct mlxsw_core * mlxsw_core,const struct firmware * firmware,struct netlink_ext_ack * extack)1038 static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
1039 			       struct netlink_ext_ack *extack)
1040 {
1041 	struct mlxsw_core_fw_info mlxsw_core_fw_info = {
1042 		.mlxfw_dev = {
1043 			.ops = &mlxsw_core_fw_mlxsw_dev_ops,
1044 			.psid = mlxsw_core->bus_info->psid,
1045 			.psid_size = strlen(mlxsw_core->bus_info->psid),
1046 			.devlink = priv_to_devlink(mlxsw_core),
1047 		},
1048 		.mlxsw_core = mlxsw_core
1049 	};
1050 	int err;
1051 
1052 	mlxsw_core->fw_flash_in_progress = true;
1053 	err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
1054 	mlxsw_core->fw_flash_in_progress = false;
1055 
1056 	return err;
1057 }
1058 
mlxsw_core_fw_rev_validate(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_fw_rev * req_rev,const char * filename)1059 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
1060 				      const struct mlxsw_bus_info *mlxsw_bus_info,
1061 				      const struct mlxsw_fw_rev *req_rev,
1062 				      const char *filename)
1063 {
1064 	const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
1065 	union devlink_param_value value;
1066 	const struct firmware *firmware;
1067 	int err;
1068 
1069 	/* Don't check if driver does not require it */
1070 	if (!req_rev || !filename)
1071 		return 0;
1072 
1073 	/* Don't check if devlink 'fw_load_policy' param is 'flash' */
1074 	err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
1075 						 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1076 						 &value);
1077 	if (err)
1078 		return err;
1079 	if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
1080 		return 0;
1081 
1082 	/* Validate driver & FW are compatible */
1083 	if (rev->major != req_rev->major) {
1084 		WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
1085 		     rev->major, req_rev->major);
1086 		return -EINVAL;
1087 	}
1088 	if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
1089 		return 0;
1090 
1091 	dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
1092 		rev->major, rev->minor, rev->subminor, req_rev->major,
1093 		req_rev->minor, req_rev->subminor);
1094 	dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
1095 
1096 	err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
1097 	if (err) {
1098 		dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
1099 		return err;
1100 	}
1101 
1102 	err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
1103 	release_firmware(firmware);
1104 	if (err)
1105 		dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
1106 
1107 	/* On FW flash success, tell the caller FW reset is needed
1108 	 * if current FW supports it.
1109 	 */
1110 	if (rev->minor >= req_rev->can_reset_minor)
1111 		return err ? err : -EAGAIN;
1112 	else
1113 		return 0;
1114 }
1115 
mlxsw_core_fw_flash_update(struct mlxsw_core * mlxsw_core,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1116 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
1117 				      struct devlink_flash_update_params *params,
1118 				      struct netlink_ext_ack *extack)
1119 {
1120 	const struct firmware *firmware;
1121 	int err;
1122 
1123 	err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev);
1124 	if (err)
1125 		return err;
1126 	err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
1127 	release_firmware(firmware);
1128 
1129 	return err;
1130 }
1131 
mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1132 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
1133 							    union devlink_param_value val,
1134 							    struct netlink_ext_ack *extack)
1135 {
1136 	if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
1137 	    val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
1138 		NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
1139 		return -EINVAL;
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
1146 	DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
1147 			      mlxsw_core_devlink_param_fw_load_policy_validate),
1148 };
1149 
mlxsw_core_fw_params_register(struct mlxsw_core * mlxsw_core)1150 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
1151 {
1152 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
1153 	union devlink_param_value value;
1154 	int err;
1155 
1156 	err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
1157 				      ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1158 	if (err)
1159 		return err;
1160 
1161 	value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
1162 	devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
1163 	return 0;
1164 }
1165 
mlxsw_core_fw_params_unregister(struct mlxsw_core * mlxsw_core)1166 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
1167 {
1168 	devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
1169 				  ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1170 }
1171 
mlxsw_devlink_port_split(struct devlink * devlink,unsigned int port_index,unsigned int count,struct netlink_ext_ack * extack)1172 static int mlxsw_devlink_port_split(struct devlink *devlink,
1173 				    unsigned int port_index,
1174 				    unsigned int count,
1175 				    struct netlink_ext_ack *extack)
1176 {
1177 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1178 
1179 	if (port_index >= mlxsw_core->max_ports) {
1180 		NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
1181 		return -EINVAL;
1182 	}
1183 	if (!mlxsw_core->driver->port_split)
1184 		return -EOPNOTSUPP;
1185 	return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
1186 					      extack);
1187 }
1188 
mlxsw_devlink_port_unsplit(struct devlink * devlink,unsigned int port_index,struct netlink_ext_ack * extack)1189 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
1190 				      unsigned int port_index,
1191 				      struct netlink_ext_ack *extack)
1192 {
1193 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1194 
1195 	if (port_index >= mlxsw_core->max_ports) {
1196 		NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
1197 		return -EINVAL;
1198 	}
1199 	if (!mlxsw_core->driver->port_unsplit)
1200 		return -EOPNOTSUPP;
1201 	return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
1202 						extack);
1203 }
1204 
1205 static int
mlxsw_devlink_sb_pool_get(struct devlink * devlink,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)1206 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
1207 			  unsigned int sb_index, u16 pool_index,
1208 			  struct devlink_sb_pool_info *pool_info)
1209 {
1210 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1211 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1212 
1213 	if (!mlxsw_driver->sb_pool_get)
1214 		return -EOPNOTSUPP;
1215 	return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
1216 					 pool_index, pool_info);
1217 }
1218 
1219 static int
mlxsw_devlink_sb_pool_set(struct devlink * devlink,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)1220 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
1221 			  unsigned int sb_index, u16 pool_index, u32 size,
1222 			  enum devlink_sb_threshold_type threshold_type,
1223 			  struct netlink_ext_ack *extack)
1224 {
1225 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1226 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1227 
1228 	if (!mlxsw_driver->sb_pool_set)
1229 		return -EOPNOTSUPP;
1230 	return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
1231 					 pool_index, size, threshold_type,
1232 					 extack);
1233 }
1234 
__dl_port(struct devlink_port * devlink_port)1235 static void *__dl_port(struct devlink_port *devlink_port)
1236 {
1237 	return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
1238 }
1239 
mlxsw_devlink_port_type_set(struct devlink_port * devlink_port,enum devlink_port_type port_type)1240 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
1241 				       enum devlink_port_type port_type)
1242 {
1243 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1244 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1245 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1246 
1247 	if (!mlxsw_driver->port_type_set)
1248 		return -EOPNOTSUPP;
1249 
1250 	return mlxsw_driver->port_type_set(mlxsw_core,
1251 					   mlxsw_core_port->local_port,
1252 					   port_type);
1253 }
1254 
mlxsw_devlink_sb_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)1255 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
1256 					  unsigned int sb_index, u16 pool_index,
1257 					  u32 *p_threshold)
1258 {
1259 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1260 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1261 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1262 
1263 	if (!mlxsw_driver->sb_port_pool_get ||
1264 	    !mlxsw_core_port_check(mlxsw_core_port))
1265 		return -EOPNOTSUPP;
1266 	return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
1267 					      pool_index, p_threshold);
1268 }
1269 
mlxsw_devlink_sb_port_pool_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1270 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
1271 					  unsigned int sb_index, u16 pool_index,
1272 					  u32 threshold,
1273 					  struct netlink_ext_ack *extack)
1274 {
1275 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1276 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1277 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1278 
1279 	if (!mlxsw_driver->sb_port_pool_set ||
1280 	    !mlxsw_core_port_check(mlxsw_core_port))
1281 		return -EOPNOTSUPP;
1282 	return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
1283 					      pool_index, threshold, extack);
1284 }
1285 
1286 static int
mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)1287 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
1288 				  unsigned int sb_index, u16 tc_index,
1289 				  enum devlink_sb_pool_type pool_type,
1290 				  u16 *p_pool_index, u32 *p_threshold)
1291 {
1292 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1293 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1294 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1295 
1296 	if (!mlxsw_driver->sb_tc_pool_bind_get ||
1297 	    !mlxsw_core_port_check(mlxsw_core_port))
1298 		return -EOPNOTSUPP;
1299 	return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
1300 						 tc_index, pool_type,
1301 						 p_pool_index, p_threshold);
1302 }
1303 
1304 static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1305 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
1306 				  unsigned int sb_index, u16 tc_index,
1307 				  enum devlink_sb_pool_type pool_type,
1308 				  u16 pool_index, u32 threshold,
1309 				  struct netlink_ext_ack *extack)
1310 {
1311 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1312 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1313 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1314 
1315 	if (!mlxsw_driver->sb_tc_pool_bind_set ||
1316 	    !mlxsw_core_port_check(mlxsw_core_port))
1317 		return -EOPNOTSUPP;
1318 	return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
1319 						 tc_index, pool_type,
1320 						 pool_index, threshold, extack);
1321 }
1322 
mlxsw_devlink_sb_occ_snapshot(struct devlink * devlink,unsigned int sb_index)1323 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1324 					 unsigned int sb_index)
1325 {
1326 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1327 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1328 
1329 	if (!mlxsw_driver->sb_occ_snapshot)
1330 		return -EOPNOTSUPP;
1331 	return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1332 }
1333 
mlxsw_devlink_sb_occ_max_clear(struct devlink * devlink,unsigned int sb_index)1334 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1335 					  unsigned int sb_index)
1336 {
1337 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1338 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1339 
1340 	if (!mlxsw_driver->sb_occ_max_clear)
1341 		return -EOPNOTSUPP;
1342 	return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1343 }
1344 
1345 static int
mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)1346 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1347 				   unsigned int sb_index, u16 pool_index,
1348 				   u32 *p_cur, u32 *p_max)
1349 {
1350 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1351 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1352 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1353 
1354 	if (!mlxsw_driver->sb_occ_port_pool_get ||
1355 	    !mlxsw_core_port_check(mlxsw_core_port))
1356 		return -EOPNOTSUPP;
1357 	return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1358 						  pool_index, p_cur, p_max);
1359 }
1360 
1361 static int
mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1362 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1363 				      unsigned int sb_index, u16 tc_index,
1364 				      enum devlink_sb_pool_type pool_type,
1365 				      u32 *p_cur, u32 *p_max)
1366 {
1367 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1368 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1369 	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1370 
1371 	if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
1372 	    !mlxsw_core_port_check(mlxsw_core_port))
1373 		return -EOPNOTSUPP;
1374 	return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1375 						     sb_index, tc_index,
1376 						     pool_type, p_cur, p_max);
1377 }
1378 
1379 static int
mlxsw_devlink_info_get(struct devlink * devlink,struct devlink_info_req * req,struct netlink_ext_ack * extack)1380 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1381 		       struct netlink_ext_ack *extack)
1382 {
1383 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1384 	char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
1385 	u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
1386 	char mgir_pl[MLXSW_REG_MGIR_LEN];
1387 	char buf[32];
1388 	int err;
1389 
1390 	err = devlink_info_driver_name_put(req,
1391 					   mlxsw_core->bus_info->device_kind);
1392 	if (err)
1393 		return err;
1394 
1395 	mlxsw_reg_mgir_pack(mgir_pl);
1396 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
1397 	if (err)
1398 		return err;
1399 	mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
1400 			      &fw_minor, &fw_sub_minor);
1401 
1402 	sprintf(buf, "%X", hw_rev);
1403 	err = devlink_info_version_fixed_put(req, "hw.revision", buf);
1404 	if (err)
1405 		return err;
1406 
1407 	err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
1408 	if (err)
1409 		return err;
1410 
1411 	sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
1412 	err = devlink_info_version_running_put(req, "fw.version", buf);
1413 	if (err)
1414 		return err;
1415 
1416 	return 0;
1417 }
1418 
1419 static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink * devlink,bool netns_change,enum devlink_reload_action action,enum devlink_reload_limit limit,struct netlink_ext_ack * extack)1420 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
1421 					  bool netns_change, enum devlink_reload_action action,
1422 					  enum devlink_reload_limit limit,
1423 					  struct netlink_ext_ack *extack)
1424 {
1425 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1426 
1427 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1428 		return -EOPNOTSUPP;
1429 
1430 	mlxsw_core_bus_device_unregister(mlxsw_core, true);
1431 	return 0;
1432 }
1433 
1434 static int
mlxsw_devlink_core_bus_device_reload_up(struct devlink * devlink,enum devlink_reload_action action,enum devlink_reload_limit limit,u32 * actions_performed,struct netlink_ext_ack * extack)1435 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
1436 					enum devlink_reload_limit limit, u32 *actions_performed,
1437 					struct netlink_ext_ack *extack)
1438 {
1439 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1440 
1441 	*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1442 			     BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1443 	return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1444 					      mlxsw_core->bus,
1445 					      mlxsw_core->bus_priv, true,
1446 					      devlink, extack);
1447 }
1448 
mlxsw_devlink_flash_update(struct devlink * devlink,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1449 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1450 				      struct devlink_flash_update_params *params,
1451 				      struct netlink_ext_ack *extack)
1452 {
1453 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1454 
1455 	return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
1456 }
1457 
mlxsw_devlink_trap_init(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1458 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1459 				   const struct devlink_trap *trap,
1460 				   void *trap_ctx)
1461 {
1462 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1463 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1464 
1465 	if (!mlxsw_driver->trap_init)
1466 		return -EOPNOTSUPP;
1467 	return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1468 }
1469 
mlxsw_devlink_trap_fini(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1470 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1471 				    const struct devlink_trap *trap,
1472 				    void *trap_ctx)
1473 {
1474 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1475 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1476 
1477 	if (!mlxsw_driver->trap_fini)
1478 		return;
1479 	mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1480 }
1481 
mlxsw_devlink_trap_action_set(struct devlink * devlink,const struct devlink_trap * trap,enum devlink_trap_action action,struct netlink_ext_ack * extack)1482 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1483 					 const struct devlink_trap *trap,
1484 					 enum devlink_trap_action action,
1485 					 struct netlink_ext_ack *extack)
1486 {
1487 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1488 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1489 
1490 	if (!mlxsw_driver->trap_action_set)
1491 		return -EOPNOTSUPP;
1492 	return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
1493 }
1494 
1495 static int
mlxsw_devlink_trap_group_init(struct devlink * devlink,const struct devlink_trap_group * group)1496 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1497 			      const struct devlink_trap_group *group)
1498 {
1499 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1500 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1501 
1502 	if (!mlxsw_driver->trap_group_init)
1503 		return -EOPNOTSUPP;
1504 	return mlxsw_driver->trap_group_init(mlxsw_core, group);
1505 }
1506 
1507 static int
mlxsw_devlink_trap_group_set(struct devlink * devlink,const struct devlink_trap_group * group,const struct devlink_trap_policer * policer,struct netlink_ext_ack * extack)1508 mlxsw_devlink_trap_group_set(struct devlink *devlink,
1509 			     const struct devlink_trap_group *group,
1510 			     const struct devlink_trap_policer *policer,
1511 			     struct netlink_ext_ack *extack)
1512 {
1513 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1514 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1515 
1516 	if (!mlxsw_driver->trap_group_set)
1517 		return -EOPNOTSUPP;
1518 	return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
1519 }
1520 
1521 static int
mlxsw_devlink_trap_policer_init(struct devlink * devlink,const struct devlink_trap_policer * policer)1522 mlxsw_devlink_trap_policer_init(struct devlink *devlink,
1523 				const struct devlink_trap_policer *policer)
1524 {
1525 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1526 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1527 
1528 	if (!mlxsw_driver->trap_policer_init)
1529 		return -EOPNOTSUPP;
1530 	return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
1531 }
1532 
1533 static void
mlxsw_devlink_trap_policer_fini(struct devlink * devlink,const struct devlink_trap_policer * policer)1534 mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
1535 				const struct devlink_trap_policer *policer)
1536 {
1537 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1538 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1539 
1540 	if (!mlxsw_driver->trap_policer_fini)
1541 		return;
1542 	mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
1543 }
1544 
1545 static int
mlxsw_devlink_trap_policer_set(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 rate,u64 burst,struct netlink_ext_ack * extack)1546 mlxsw_devlink_trap_policer_set(struct devlink *devlink,
1547 			       const struct devlink_trap_policer *policer,
1548 			       u64 rate, u64 burst,
1549 			       struct netlink_ext_ack *extack)
1550 {
1551 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1552 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1553 
1554 	if (!mlxsw_driver->trap_policer_set)
1555 		return -EOPNOTSUPP;
1556 	return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
1557 					      extack);
1558 }
1559 
1560 static int
mlxsw_devlink_trap_policer_counter_get(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 * p_drops)1561 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
1562 				       const struct devlink_trap_policer *policer,
1563 				       u64 *p_drops)
1564 {
1565 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1566 	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1567 
1568 	if (!mlxsw_driver->trap_policer_counter_get)
1569 		return -EOPNOTSUPP;
1570 	return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
1571 						      p_drops);
1572 }
1573 
1574 static const struct devlink_ops mlxsw_devlink_ops = {
1575 	.reload_actions		= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1576 				  BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1577 	.reload_down		= mlxsw_devlink_core_bus_device_reload_down,
1578 	.reload_up		= mlxsw_devlink_core_bus_device_reload_up,
1579 	.port_type_set			= mlxsw_devlink_port_type_set,
1580 	.port_split			= mlxsw_devlink_port_split,
1581 	.port_unsplit			= mlxsw_devlink_port_unsplit,
1582 	.sb_pool_get			= mlxsw_devlink_sb_pool_get,
1583 	.sb_pool_set			= mlxsw_devlink_sb_pool_set,
1584 	.sb_port_pool_get		= mlxsw_devlink_sb_port_pool_get,
1585 	.sb_port_pool_set		= mlxsw_devlink_sb_port_pool_set,
1586 	.sb_tc_pool_bind_get		= mlxsw_devlink_sb_tc_pool_bind_get,
1587 	.sb_tc_pool_bind_set		= mlxsw_devlink_sb_tc_pool_bind_set,
1588 	.sb_occ_snapshot		= mlxsw_devlink_sb_occ_snapshot,
1589 	.sb_occ_max_clear		= mlxsw_devlink_sb_occ_max_clear,
1590 	.sb_occ_port_pool_get		= mlxsw_devlink_sb_occ_port_pool_get,
1591 	.sb_occ_tc_port_bind_get	= mlxsw_devlink_sb_occ_tc_port_bind_get,
1592 	.info_get			= mlxsw_devlink_info_get,
1593 	.flash_update			= mlxsw_devlink_flash_update,
1594 	.trap_init			= mlxsw_devlink_trap_init,
1595 	.trap_fini			= mlxsw_devlink_trap_fini,
1596 	.trap_action_set		= mlxsw_devlink_trap_action_set,
1597 	.trap_group_init		= mlxsw_devlink_trap_group_init,
1598 	.trap_group_set			= mlxsw_devlink_trap_group_set,
1599 	.trap_policer_init		= mlxsw_devlink_trap_policer_init,
1600 	.trap_policer_fini		= mlxsw_devlink_trap_policer_fini,
1601 	.trap_policer_set		= mlxsw_devlink_trap_policer_set,
1602 	.trap_policer_counter_get	= mlxsw_devlink_trap_policer_counter_get,
1603 };
1604 
mlxsw_core_params_register(struct mlxsw_core * mlxsw_core)1605 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
1606 {
1607 	int err;
1608 
1609 	err = mlxsw_core_fw_params_register(mlxsw_core);
1610 	if (err)
1611 		return err;
1612 
1613 	if (mlxsw_core->driver->params_register) {
1614 		err = mlxsw_core->driver->params_register(mlxsw_core);
1615 		if (err)
1616 			goto err_params_register;
1617 	}
1618 	return 0;
1619 
1620 err_params_register:
1621 	mlxsw_core_fw_params_unregister(mlxsw_core);
1622 	return err;
1623 }
1624 
mlxsw_core_params_unregister(struct mlxsw_core * mlxsw_core)1625 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
1626 {
1627 	mlxsw_core_fw_params_unregister(mlxsw_core);
1628 	if (mlxsw_core->driver->params_register)
1629 		mlxsw_core->driver->params_unregister(mlxsw_core);
1630 }
1631 
1632 struct mlxsw_core_health_event {
1633 	struct mlxsw_core *mlxsw_core;
1634 	char mfde_pl[MLXSW_REG_MFDE_LEN];
1635 	struct work_struct work;
1636 };
1637 
mlxsw_core_health_event_work(struct work_struct * work)1638 static void mlxsw_core_health_event_work(struct work_struct *work)
1639 {
1640 	struct mlxsw_core_health_event *event;
1641 	struct mlxsw_core *mlxsw_core;
1642 
1643 	event = container_of(work, struct mlxsw_core_health_event, work);
1644 	mlxsw_core = event->mlxsw_core;
1645 	devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
1646 			      event->mfde_pl);
1647 	kfree(event);
1648 }
1649 
mlxsw_core_health_listener_func(const struct mlxsw_reg_info * reg,char * mfde_pl,void * priv)1650 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
1651 					    char *mfde_pl, void *priv)
1652 {
1653 	struct mlxsw_core_health_event *event;
1654 	struct mlxsw_core *mlxsw_core = priv;
1655 
1656 	event = kmalloc(sizeof(*event), GFP_ATOMIC);
1657 	if (!event)
1658 		return;
1659 	event->mlxsw_core = mlxsw_core;
1660 	memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
1661 	INIT_WORK(&event->work, mlxsw_core_health_event_work);
1662 	mlxsw_core_schedule_work(&event->work);
1663 }
1664 
1665 static const struct mlxsw_listener mlxsw_core_health_listener =
1666 	MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
1667 
mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)1668 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
1669 					   struct devlink_fmsg *fmsg, void *priv_ctx,
1670 					   struct netlink_ext_ack *extack)
1671 {
1672 	char *mfde_pl = priv_ctx;
1673 	char *val_str;
1674 	u8 event_id;
1675 	u32 val;
1676 	int err;
1677 
1678 	if (!priv_ctx)
1679 		/* User-triggered dumps are not possible */
1680 		return -EOPNOTSUPP;
1681 
1682 	val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
1683 	err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
1684 	if (err)
1685 		return err;
1686 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
1687 	if (err)
1688 		return err;
1689 
1690 	event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
1691 	err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
1692 	if (err)
1693 		return err;
1694 	switch (event_id) {
1695 	case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1696 		val_str = "CR space timeout";
1697 		break;
1698 	case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1699 		val_str = "KVD insertion machine stopped";
1700 		break;
1701 	default:
1702 		val_str = NULL;
1703 	}
1704 	if (val_str) {
1705 		err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1706 		if (err)
1707 			return err;
1708 	}
1709 	err = devlink_fmsg_arr_pair_nest_end(fmsg);
1710 	if (err)
1711 		return err;
1712 
1713 	val = mlxsw_reg_mfde_method_get(mfde_pl);
1714 	switch (val) {
1715 	case MLXSW_REG_MFDE_METHOD_QUERY:
1716 		val_str = "query";
1717 		break;
1718 	case MLXSW_REG_MFDE_METHOD_WRITE:
1719 		val_str = "write";
1720 		break;
1721 	default:
1722 		val_str = NULL;
1723 	}
1724 	if (val_str) {
1725 		err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
1726 		if (err)
1727 			return err;
1728 	}
1729 
1730 	val = mlxsw_reg_mfde_long_process_get(mfde_pl);
1731 	err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
1732 	if (err)
1733 		return err;
1734 
1735 	val = mlxsw_reg_mfde_command_type_get(mfde_pl);
1736 	switch (val) {
1737 	case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
1738 		val_str = "mad";
1739 		break;
1740 	case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
1741 		val_str = "emad";
1742 		break;
1743 	case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
1744 		val_str = "cmdif";
1745 		break;
1746 	default:
1747 		val_str = NULL;
1748 	}
1749 	if (val_str) {
1750 		err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
1751 		if (err)
1752 			return err;
1753 	}
1754 
1755 	val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
1756 	err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
1757 	if (err)
1758 		return err;
1759 
1760 	if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
1761 		val = mlxsw_reg_mfde_log_address_get(mfde_pl);
1762 		err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
1763 		if (err)
1764 			return err;
1765 		val = mlxsw_reg_mfde_log_id_get(mfde_pl);
1766 		err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
1767 		if (err)
1768 			return err;
1769 	} else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
1770 		val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
1771 		err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
1772 		if (err)
1773 			return err;
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 static int
mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter * reporter,struct netlink_ext_ack * extack)1780 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
1781 				struct netlink_ext_ack *extack)
1782 {
1783 	struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
1784 	char mfgd_pl[MLXSW_REG_MFGD_LEN];
1785 	int err;
1786 
1787 	/* Read the register first to make sure no other bits are changed. */
1788 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1789 	if (err)
1790 		return err;
1791 	mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
1792 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1793 }
1794 
1795 static const struct devlink_health_reporter_ops
1796 mlxsw_core_health_fw_fatal_ops = {
1797 	.name = "fw_fatal",
1798 	.dump = mlxsw_core_health_fw_fatal_dump,
1799 	.test = mlxsw_core_health_fw_fatal_test,
1800 };
1801 
mlxsw_core_health_fw_fatal_config(struct mlxsw_core * mlxsw_core,bool enable)1802 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
1803 					     bool enable)
1804 {
1805 	char mfgd_pl[MLXSW_REG_MFGD_LEN];
1806 	int err;
1807 
1808 	/* Read the register first to make sure no other bits are changed. */
1809 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1810 	if (err)
1811 		return err;
1812 	mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
1813 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1814 }
1815 
mlxsw_core_health_init(struct mlxsw_core * mlxsw_core)1816 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
1817 {
1818 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
1819 	struct devlink_health_reporter *fw_fatal;
1820 	int err;
1821 
1822 	if (!mlxsw_core->driver->fw_fatal_enabled)
1823 		return 0;
1824 
1825 	fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
1826 						  0, mlxsw_core);
1827 	if (IS_ERR(fw_fatal)) {
1828 		dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
1829 		return PTR_ERR(fw_fatal);
1830 	}
1831 	mlxsw_core->health.fw_fatal = fw_fatal;
1832 
1833 	err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1834 	if (err)
1835 		goto err_trap_register;
1836 
1837 	err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
1838 	if (err)
1839 		goto err_fw_fatal_config;
1840 
1841 	return 0;
1842 
1843 err_fw_fatal_config:
1844 	mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1845 err_trap_register:
1846 	devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
1847 	return err;
1848 }
1849 
mlxsw_core_health_fini(struct mlxsw_core * mlxsw_core)1850 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
1851 {
1852 	if (!mlxsw_core->driver->fw_fatal_enabled)
1853 		return;
1854 
1855 	mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
1856 	mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1857 	/* Make sure there is no more event work scheduled */
1858 	mlxsw_core_flush_owq();
1859 	devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
1860 }
1861 
1862 static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)1863 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1864 				 const struct mlxsw_bus *mlxsw_bus,
1865 				 void *bus_priv, bool reload,
1866 				 struct devlink *devlink,
1867 				 struct netlink_ext_ack *extack)
1868 {
1869 	const char *device_kind = mlxsw_bus_info->device_kind;
1870 	struct mlxsw_core *mlxsw_core;
1871 	struct mlxsw_driver *mlxsw_driver;
1872 	struct mlxsw_res *res;
1873 	size_t alloc_size;
1874 	int err;
1875 
1876 	mlxsw_driver = mlxsw_core_driver_get(device_kind);
1877 	if (!mlxsw_driver)
1878 		return -EINVAL;
1879 
1880 	if (!reload) {
1881 		alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1882 		devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1883 		if (!devlink) {
1884 			err = -ENOMEM;
1885 			goto err_devlink_alloc;
1886 		}
1887 	}
1888 
1889 	mlxsw_core = devlink_priv(devlink);
1890 	INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1891 	INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1892 	mlxsw_core->driver = mlxsw_driver;
1893 	mlxsw_core->bus = mlxsw_bus;
1894 	mlxsw_core->bus_priv = bus_priv;
1895 	mlxsw_core->bus_info = mlxsw_bus_info;
1896 
1897 	res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1898 	err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1899 	if (err)
1900 		goto err_bus_init;
1901 
1902 	if (mlxsw_driver->resources_register && !reload) {
1903 		err = mlxsw_driver->resources_register(mlxsw_core);
1904 		if (err)
1905 			goto err_register_resources;
1906 	}
1907 
1908 	err = mlxsw_ports_init(mlxsw_core);
1909 	if (err)
1910 		goto err_ports_init;
1911 
1912 	if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1913 	    MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1914 		alloc_size = sizeof(u8) *
1915 			MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1916 			MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1917 		mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1918 		if (!mlxsw_core->lag.mapping) {
1919 			err = -ENOMEM;
1920 			goto err_alloc_lag_mapping;
1921 		}
1922 	}
1923 
1924 	err = mlxsw_emad_init(mlxsw_core);
1925 	if (err)
1926 		goto err_emad_init;
1927 
1928 	if (!reload) {
1929 		err = devlink_register(devlink, mlxsw_bus_info->dev);
1930 		if (err)
1931 			goto err_devlink_register;
1932 	}
1933 
1934 	if (!reload) {
1935 		err = mlxsw_core_params_register(mlxsw_core);
1936 		if (err)
1937 			goto err_register_params;
1938 	}
1939 
1940 	err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
1941 					 mlxsw_driver->fw_filename);
1942 	if (err)
1943 		goto err_fw_rev_validate;
1944 
1945 	err = mlxsw_core_health_init(mlxsw_core);
1946 	if (err)
1947 		goto err_health_init;
1948 
1949 	if (mlxsw_driver->init) {
1950 		err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
1951 		if (err)
1952 			goto err_driver_init;
1953 	}
1954 
1955 	err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1956 	if (err)
1957 		goto err_hwmon_init;
1958 
1959 	err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1960 				 &mlxsw_core->thermal);
1961 	if (err)
1962 		goto err_thermal_init;
1963 
1964 	err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
1965 	if (err)
1966 		goto err_env_init;
1967 
1968 	mlxsw_core->is_initialized = true;
1969 	devlink_params_publish(devlink);
1970 
1971 	if (!reload)
1972 		devlink_reload_enable(devlink);
1973 
1974 	return 0;
1975 
1976 err_env_init:
1977 	mlxsw_thermal_fini(mlxsw_core->thermal);
1978 err_thermal_init:
1979 	mlxsw_hwmon_fini(mlxsw_core->hwmon);
1980 err_hwmon_init:
1981 	if (mlxsw_core->driver->fini)
1982 		mlxsw_core->driver->fini(mlxsw_core);
1983 err_driver_init:
1984 	mlxsw_core_health_fini(mlxsw_core);
1985 err_health_init:
1986 err_fw_rev_validate:
1987 	if (!reload)
1988 		mlxsw_core_params_unregister(mlxsw_core);
1989 err_register_params:
1990 	if (!reload)
1991 		devlink_unregister(devlink);
1992 err_devlink_register:
1993 	mlxsw_emad_fini(mlxsw_core);
1994 err_emad_init:
1995 	kfree(mlxsw_core->lag.mapping);
1996 err_alloc_lag_mapping:
1997 	mlxsw_ports_fini(mlxsw_core);
1998 err_ports_init:
1999 	if (!reload)
2000 		devlink_resources_unregister(devlink, NULL);
2001 err_register_resources:
2002 	mlxsw_bus->fini(bus_priv);
2003 err_bus_init:
2004 	if (!reload)
2005 		devlink_free(devlink);
2006 err_devlink_alloc:
2007 	return err;
2008 }
2009 
mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)2010 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2011 				   const struct mlxsw_bus *mlxsw_bus,
2012 				   void *bus_priv, bool reload,
2013 				   struct devlink *devlink,
2014 				   struct netlink_ext_ack *extack)
2015 {
2016 	bool called_again = false;
2017 	int err;
2018 
2019 again:
2020 	err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
2021 					       bus_priv, reload,
2022 					       devlink, extack);
2023 	/* -EAGAIN is returned in case the FW was updated. FW needs
2024 	 * a reset, so lets try to call __mlxsw_core_bus_device_register()
2025 	 * again.
2026 	 */
2027 	if (err == -EAGAIN && !called_again) {
2028 		called_again = true;
2029 		goto again;
2030 	}
2031 
2032 	return err;
2033 }
2034 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
2035 
mlxsw_core_bus_device_unregister(struct mlxsw_core * mlxsw_core,bool reload)2036 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
2037 				      bool reload)
2038 {
2039 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
2040 
2041 	if (!reload)
2042 		devlink_reload_disable(devlink);
2043 	if (devlink_is_reload_failed(devlink)) {
2044 		if (!reload)
2045 			/* Only the parts that were not de-initialized in the
2046 			 * failed reload attempt need to be de-initialized.
2047 			 */
2048 			goto reload_fail_deinit;
2049 		else
2050 			return;
2051 	}
2052 
2053 	devlink_params_unpublish(devlink);
2054 	mlxsw_core->is_initialized = false;
2055 	mlxsw_env_fini(mlxsw_core->env);
2056 	mlxsw_thermal_fini(mlxsw_core->thermal);
2057 	mlxsw_hwmon_fini(mlxsw_core->hwmon);
2058 	if (mlxsw_core->driver->fini)
2059 		mlxsw_core->driver->fini(mlxsw_core);
2060 	mlxsw_core_health_fini(mlxsw_core);
2061 	if (!reload)
2062 		mlxsw_core_params_unregister(mlxsw_core);
2063 	if (!reload)
2064 		devlink_unregister(devlink);
2065 	mlxsw_emad_fini(mlxsw_core);
2066 	kfree(mlxsw_core->lag.mapping);
2067 	mlxsw_ports_fini(mlxsw_core);
2068 	if (!reload)
2069 		devlink_resources_unregister(devlink, NULL);
2070 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
2071 	if (!reload)
2072 		devlink_free(devlink);
2073 
2074 	return;
2075 
2076 reload_fail_deinit:
2077 	mlxsw_core_params_unregister(mlxsw_core);
2078 	devlink_unregister(devlink);
2079 	devlink_resources_unregister(devlink, NULL);
2080 	devlink_free(devlink);
2081 }
2082 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
2083 
mlxsw_core_skb_transmit_busy(struct mlxsw_core * mlxsw_core,const struct mlxsw_tx_info * tx_info)2084 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
2085 				  const struct mlxsw_tx_info *tx_info)
2086 {
2087 	return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
2088 						  tx_info);
2089 }
2090 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
2091 
mlxsw_core_skb_transmit(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)2092 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2093 			    const struct mlxsw_tx_info *tx_info)
2094 {
2095 	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
2096 					     tx_info);
2097 }
2098 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
2099 
mlxsw_core_ptp_transmitted(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,u8 local_port)2100 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
2101 				struct sk_buff *skb, u8 local_port)
2102 {
2103 	if (mlxsw_core->driver->ptp_transmitted)
2104 		mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
2105 						    local_port);
2106 }
2107 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
2108 
__is_rx_listener_equal(const struct mlxsw_rx_listener * rxl_a,const struct mlxsw_rx_listener * rxl_b)2109 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
2110 				   const struct mlxsw_rx_listener *rxl_b)
2111 {
2112 	return (rxl_a->func == rxl_b->func &&
2113 		rxl_a->local_port == rxl_b->local_port &&
2114 		rxl_a->trap_id == rxl_b->trap_id &&
2115 		rxl_a->mirror_reason == rxl_b->mirror_reason);
2116 }
2117 
2118 static struct mlxsw_rx_listener_item *
__find_rx_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2119 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
2120 			const struct mlxsw_rx_listener *rxl)
2121 {
2122 	struct mlxsw_rx_listener_item *rxl_item;
2123 
2124 	list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
2125 		if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
2126 			return rxl_item;
2127 	}
2128 	return NULL;
2129 }
2130 
mlxsw_core_rx_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,void * priv,bool enabled)2131 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
2132 				    const struct mlxsw_rx_listener *rxl,
2133 				    void *priv, bool enabled)
2134 {
2135 	struct mlxsw_rx_listener_item *rxl_item;
2136 
2137 	rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2138 	if (rxl_item)
2139 		return -EEXIST;
2140 	rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
2141 	if (!rxl_item)
2142 		return -ENOMEM;
2143 	rxl_item->rxl = *rxl;
2144 	rxl_item->priv = priv;
2145 	rxl_item->enabled = enabled;
2146 
2147 	list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
2148 	return 0;
2149 }
2150 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
2151 
mlxsw_core_rx_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2152 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
2153 				       const struct mlxsw_rx_listener *rxl)
2154 {
2155 	struct mlxsw_rx_listener_item *rxl_item;
2156 
2157 	rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2158 	if (!rxl_item)
2159 		return;
2160 	list_del_rcu(&rxl_item->list);
2161 	synchronize_rcu();
2162 	kfree(rxl_item);
2163 }
2164 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
2165 
2166 static void
mlxsw_core_rx_listener_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,bool enabled)2167 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
2168 				 const struct mlxsw_rx_listener *rxl,
2169 				 bool enabled)
2170 {
2171 	struct mlxsw_rx_listener_item *rxl_item;
2172 
2173 	rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2174 	if (WARN_ON(!rxl_item))
2175 		return;
2176 	rxl_item->enabled = enabled;
2177 }
2178 
mlxsw_core_event_listener_func(struct sk_buff * skb,u8 local_port,void * priv)2179 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
2180 					   void *priv)
2181 {
2182 	struct mlxsw_event_listener_item *event_listener_item = priv;
2183 	struct mlxsw_reg_info reg;
2184 	char *payload;
2185 	char *reg_tlv;
2186 	char *op_tlv;
2187 
2188 	mlxsw_emad_tlv_parse(skb);
2189 	op_tlv = mlxsw_emad_op_tlv(skb);
2190 	reg_tlv = mlxsw_emad_reg_tlv(skb);
2191 
2192 	reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
2193 	reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
2194 	payload = mlxsw_emad_reg_payload(reg_tlv);
2195 	event_listener_item->el.func(&reg, payload, event_listener_item->priv);
2196 	dev_kfree_skb(skb);
2197 }
2198 
__is_event_listener_equal(const struct mlxsw_event_listener * el_a,const struct mlxsw_event_listener * el_b)2199 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
2200 				      const struct mlxsw_event_listener *el_b)
2201 {
2202 	return (el_a->func == el_b->func &&
2203 		el_a->trap_id == el_b->trap_id);
2204 }
2205 
2206 static struct mlxsw_event_listener_item *
__find_event_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2207 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
2208 			   const struct mlxsw_event_listener *el)
2209 {
2210 	struct mlxsw_event_listener_item *el_item;
2211 
2212 	list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
2213 		if (__is_event_listener_equal(&el_item->el, el))
2214 			return el_item;
2215 	}
2216 	return NULL;
2217 }
2218 
mlxsw_core_event_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el,void * priv)2219 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
2220 				       const struct mlxsw_event_listener *el,
2221 				       void *priv)
2222 {
2223 	int err;
2224 	struct mlxsw_event_listener_item *el_item;
2225 	const struct mlxsw_rx_listener rxl = {
2226 		.func = mlxsw_core_event_listener_func,
2227 		.local_port = MLXSW_PORT_DONT_CARE,
2228 		.trap_id = el->trap_id,
2229 	};
2230 
2231 	el_item = __find_event_listener_item(mlxsw_core, el);
2232 	if (el_item)
2233 		return -EEXIST;
2234 	el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
2235 	if (!el_item)
2236 		return -ENOMEM;
2237 	el_item->el = *el;
2238 	el_item->priv = priv;
2239 
2240 	err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
2241 	if (err)
2242 		goto err_rx_listener_register;
2243 
2244 	/* No reason to save item if we did not manage to register an RX
2245 	 * listener for it.
2246 	 */
2247 	list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
2248 
2249 	return 0;
2250 
2251 err_rx_listener_register:
2252 	kfree(el_item);
2253 	return err;
2254 }
2255 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
2256 
mlxsw_core_event_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2257 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
2258 					  const struct mlxsw_event_listener *el)
2259 {
2260 	struct mlxsw_event_listener_item *el_item;
2261 	const struct mlxsw_rx_listener rxl = {
2262 		.func = mlxsw_core_event_listener_func,
2263 		.local_port = MLXSW_PORT_DONT_CARE,
2264 		.trap_id = el->trap_id,
2265 	};
2266 
2267 	el_item = __find_event_listener_item(mlxsw_core, el);
2268 	if (!el_item)
2269 		return;
2270 	mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
2271 	list_del(&el_item->list);
2272 	kfree(el_item);
2273 }
2274 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
2275 
mlxsw_core_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv,bool enabled)2276 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
2277 					const struct mlxsw_listener *listener,
2278 					void *priv, bool enabled)
2279 {
2280 	if (listener->is_event) {
2281 		WARN_ON(!enabled);
2282 		return mlxsw_core_event_listener_register(mlxsw_core,
2283 						&listener->event_listener,
2284 						priv);
2285 	} else {
2286 		return mlxsw_core_rx_listener_register(mlxsw_core,
2287 						&listener->rx_listener,
2288 						priv, enabled);
2289 	}
2290 }
2291 
mlxsw_core_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2292 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
2293 				      const struct mlxsw_listener *listener,
2294 				      void *priv)
2295 {
2296 	if (listener->is_event)
2297 		mlxsw_core_event_listener_unregister(mlxsw_core,
2298 						     &listener->event_listener);
2299 	else
2300 		mlxsw_core_rx_listener_unregister(mlxsw_core,
2301 						  &listener->rx_listener);
2302 }
2303 
mlxsw_core_trap_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2304 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
2305 			     const struct mlxsw_listener *listener, void *priv)
2306 {
2307 	enum mlxsw_reg_htgt_trap_group trap_group;
2308 	enum mlxsw_reg_hpkt_action action;
2309 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2310 	int err;
2311 
2312 	err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
2313 					   listener->enabled_on_register);
2314 	if (err)
2315 		return err;
2316 
2317 	action = listener->enabled_on_register ? listener->en_action :
2318 						 listener->dis_action;
2319 	trap_group = listener->enabled_on_register ? listener->en_trap_group :
2320 						     listener->dis_trap_group;
2321 	mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2322 			    trap_group, listener->is_ctrl);
2323 	err = mlxsw_reg_write(mlxsw_core,  MLXSW_REG(hpkt), hpkt_pl);
2324 	if (err)
2325 		goto err_trap_set;
2326 
2327 	return 0;
2328 
2329 err_trap_set:
2330 	mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2331 	return err;
2332 }
2333 EXPORT_SYMBOL(mlxsw_core_trap_register);
2334 
mlxsw_core_trap_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2335 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
2336 				const struct mlxsw_listener *listener,
2337 				void *priv)
2338 {
2339 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2340 
2341 	if (!listener->is_event) {
2342 		mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
2343 				    listener->trap_id, listener->dis_trap_group,
2344 				    listener->is_ctrl);
2345 		mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2346 	}
2347 
2348 	mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2349 }
2350 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
2351 
mlxsw_core_trap_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,bool enabled)2352 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
2353 			      const struct mlxsw_listener *listener,
2354 			      bool enabled)
2355 {
2356 	enum mlxsw_reg_htgt_trap_group trap_group;
2357 	enum mlxsw_reg_hpkt_action action;
2358 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2359 	int err;
2360 
2361 	/* Not supported for event listener */
2362 	if (WARN_ON(listener->is_event))
2363 		return -EINVAL;
2364 
2365 	action = enabled ? listener->en_action : listener->dis_action;
2366 	trap_group = enabled ? listener->en_trap_group :
2367 			       listener->dis_trap_group;
2368 	mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2369 			    trap_group, listener->is_ctrl);
2370 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2371 	if (err)
2372 		return err;
2373 
2374 	mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
2375 					 enabled);
2376 	return 0;
2377 }
2378 EXPORT_SYMBOL(mlxsw_core_trap_state_set);
2379 
mlxsw_core_tid_get(struct mlxsw_core * mlxsw_core)2380 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
2381 {
2382 	return atomic64_inc_return(&mlxsw_core->emad.tid);
2383 }
2384 
mlxsw_core_reg_access_emad(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2385 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
2386 				      const struct mlxsw_reg_info *reg,
2387 				      char *payload,
2388 				      enum mlxsw_core_reg_access_type type,
2389 				      struct list_head *bulk_list,
2390 				      mlxsw_reg_trans_cb_t *cb,
2391 				      unsigned long cb_priv)
2392 {
2393 	u64 tid = mlxsw_core_tid_get(mlxsw_core);
2394 	struct mlxsw_reg_trans *trans;
2395 	int err;
2396 
2397 	trans = kzalloc(sizeof(*trans), GFP_KERNEL);
2398 	if (!trans)
2399 		return -ENOMEM;
2400 
2401 	err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
2402 				    bulk_list, cb, cb_priv, tid);
2403 	if (err) {
2404 		kfree_rcu(trans, rcu);
2405 		return err;
2406 	}
2407 	return 0;
2408 }
2409 
mlxsw_reg_trans_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2410 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
2411 			  const struct mlxsw_reg_info *reg, char *payload,
2412 			  struct list_head *bulk_list,
2413 			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2414 {
2415 	return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2416 					  MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
2417 					  bulk_list, cb, cb_priv);
2418 }
2419 EXPORT_SYMBOL(mlxsw_reg_trans_query);
2420 
mlxsw_reg_trans_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2421 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
2422 			  const struct mlxsw_reg_info *reg, char *payload,
2423 			  struct list_head *bulk_list,
2424 			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2425 {
2426 	return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2427 					  MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
2428 					  bulk_list, cb, cb_priv);
2429 }
2430 EXPORT_SYMBOL(mlxsw_reg_trans_write);
2431 
2432 #define MLXSW_REG_TRANS_ERR_STRING_SIZE	256
2433 
mlxsw_reg_trans_wait(struct mlxsw_reg_trans * trans)2434 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
2435 {
2436 	char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
2437 	struct mlxsw_core *mlxsw_core = trans->core;
2438 	int err;
2439 
2440 	wait_for_completion(&trans->completion);
2441 	cancel_delayed_work_sync(&trans->timeout_dw);
2442 	err = trans->err;
2443 
2444 	if (trans->retries)
2445 		dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
2446 			 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
2447 	if (err) {
2448 		dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
2449 			trans->tid, trans->reg->id,
2450 			mlxsw_reg_id_str(trans->reg->id),
2451 			mlxsw_core_reg_access_type_str(trans->type),
2452 			trans->emad_status,
2453 			mlxsw_emad_op_tlv_status_str(trans->emad_status));
2454 
2455 		snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
2456 			 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
2457 			 trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
2458 			 mlxsw_emad_op_tlv_status_str(trans->emad_status),
2459 			 trans->emad_err_string ? trans->emad_err_string : "");
2460 
2461 		trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
2462 				    trans->emad_status, err_string);
2463 
2464 		kfree(trans->emad_err_string);
2465 	}
2466 
2467 	list_del(&trans->bulk_list);
2468 	kfree_rcu(trans, rcu);
2469 	return err;
2470 }
2471 
mlxsw_reg_trans_bulk_wait(struct list_head * bulk_list)2472 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
2473 {
2474 	struct mlxsw_reg_trans *trans;
2475 	struct mlxsw_reg_trans *tmp;
2476 	int sum_err = 0;
2477 	int err;
2478 
2479 	list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
2480 		err = mlxsw_reg_trans_wait(trans);
2481 		if (err && sum_err == 0)
2482 			sum_err = err; /* first error to be returned */
2483 	}
2484 	return sum_err;
2485 }
2486 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
2487 
mlxsw_core_reg_access_cmd(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2488 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
2489 				     const struct mlxsw_reg_info *reg,
2490 				     char *payload,
2491 				     enum mlxsw_core_reg_access_type type)
2492 {
2493 	enum mlxsw_emad_op_tlv_status status;
2494 	int err, n_retry;
2495 	bool reset_ok;
2496 	char *in_mbox, *out_mbox, *tmp;
2497 
2498 	dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
2499 		reg->id, mlxsw_reg_id_str(reg->id),
2500 		mlxsw_core_reg_access_type_str(type));
2501 
2502 	in_mbox = mlxsw_cmd_mbox_alloc();
2503 	if (!in_mbox)
2504 		return -ENOMEM;
2505 
2506 	out_mbox = mlxsw_cmd_mbox_alloc();
2507 	if (!out_mbox) {
2508 		err = -ENOMEM;
2509 		goto free_in_mbox;
2510 	}
2511 
2512 	mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
2513 			       mlxsw_core_tid_get(mlxsw_core));
2514 	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
2515 	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
2516 
2517 	/* There is a special treatment needed for MRSR (reset) register.
2518 	 * The command interface will return error after the command
2519 	 * is executed, so tell the lower layer to expect it
2520 	 * and cope accordingly.
2521 	 */
2522 	reset_ok = reg->id == MLXSW_REG_MRSR_ID;
2523 
2524 	n_retry = 0;
2525 retry:
2526 	err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
2527 	if (!err) {
2528 		err = mlxsw_emad_process_status(out_mbox, &status);
2529 		if (err) {
2530 			if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
2531 				goto retry;
2532 			dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
2533 				status, mlxsw_emad_op_tlv_status_str(status));
2534 		}
2535 	}
2536 
2537 	if (!err)
2538 		memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
2539 		       reg->len);
2540 
2541 	mlxsw_cmd_mbox_free(out_mbox);
2542 free_in_mbox:
2543 	mlxsw_cmd_mbox_free(in_mbox);
2544 	if (err)
2545 		dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
2546 			reg->id, mlxsw_reg_id_str(reg->id),
2547 			mlxsw_core_reg_access_type_str(type));
2548 	return err;
2549 }
2550 
mlxsw_core_reg_access_cb(struct mlxsw_core * mlxsw_core,char * payload,size_t payload_len,unsigned long cb_priv)2551 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
2552 				     char *payload, size_t payload_len,
2553 				     unsigned long cb_priv)
2554 {
2555 	char *orig_payload = (char *) cb_priv;
2556 
2557 	memcpy(orig_payload, payload, payload_len);
2558 }
2559 
mlxsw_core_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2560 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
2561 				 const struct mlxsw_reg_info *reg,
2562 				 char *payload,
2563 				 enum mlxsw_core_reg_access_type type)
2564 {
2565 	LIST_HEAD(bulk_list);
2566 	int err;
2567 
2568 	/* During initialization EMAD interface is not available to us,
2569 	 * so we default to command interface. We switch to EMAD interface
2570 	 * after setting the appropriate traps.
2571 	 */
2572 	if (!mlxsw_core->emad.use_emad)
2573 		return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
2574 						 payload, type);
2575 
2576 	err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
2577 					 payload, type, &bulk_list,
2578 					 mlxsw_core_reg_access_cb,
2579 					 (unsigned long) payload);
2580 	if (err)
2581 		return err;
2582 	return mlxsw_reg_trans_bulk_wait(&bulk_list);
2583 }
2584 
mlxsw_reg_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2585 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
2586 		    const struct mlxsw_reg_info *reg, char *payload)
2587 {
2588 	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2589 				     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
2590 }
2591 EXPORT_SYMBOL(mlxsw_reg_query);
2592 
mlxsw_reg_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2593 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
2594 		    const struct mlxsw_reg_info *reg, char *payload)
2595 {
2596 	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2597 				     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
2598 }
2599 EXPORT_SYMBOL(mlxsw_reg_write);
2600 
mlxsw_core_skb_receive(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,struct mlxsw_rx_info * rx_info)2601 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2602 			    struct mlxsw_rx_info *rx_info)
2603 {
2604 	struct mlxsw_rx_listener_item *rxl_item;
2605 	const struct mlxsw_rx_listener *rxl;
2606 	u8 local_port;
2607 	bool found = false;
2608 
2609 	if (rx_info->is_lag) {
2610 		dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
2611 				    __func__, rx_info->u.lag_id,
2612 				    rx_info->trap_id);
2613 		/* Upper layer does not care if the skb came from LAG or not,
2614 		 * so just get the local_port for the lag port and push it up.
2615 		 */
2616 		local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
2617 							rx_info->u.lag_id,
2618 							rx_info->lag_port_index);
2619 	} else {
2620 		local_port = rx_info->u.sys_port;
2621 	}
2622 
2623 	dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
2624 			    __func__, local_port, rx_info->trap_id);
2625 
2626 	if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
2627 	    (local_port >= mlxsw_core->max_ports))
2628 		goto drop;
2629 
2630 	rcu_read_lock();
2631 	list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
2632 		rxl = &rxl_item->rxl;
2633 		if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
2634 		     rxl->local_port == local_port) &&
2635 		    rxl->trap_id == rx_info->trap_id &&
2636 		    rxl->mirror_reason == rx_info->mirror_reason) {
2637 			if (rxl_item->enabled)
2638 				found = true;
2639 			break;
2640 		}
2641 	}
2642 	if (!found) {
2643 		rcu_read_unlock();
2644 		goto drop;
2645 	}
2646 
2647 	rxl->func(skb, local_port, rxl_item->priv);
2648 	rcu_read_unlock();
2649 	return;
2650 
2651 drop:
2652 	dev_kfree_skb(skb);
2653 }
2654 EXPORT_SYMBOL(mlxsw_core_skb_receive);
2655 
mlxsw_core_lag_mapping_index(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2656 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
2657 					u16 lag_id, u8 port_index)
2658 {
2659 	return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
2660 	       port_index;
2661 }
2662 
mlxsw_core_lag_mapping_set(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index,u8 local_port)2663 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
2664 				u16 lag_id, u8 port_index, u8 local_port)
2665 {
2666 	int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2667 						 lag_id, port_index);
2668 
2669 	mlxsw_core->lag.mapping[index] = local_port;
2670 }
2671 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
2672 
mlxsw_core_lag_mapping_get(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2673 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
2674 			      u16 lag_id, u8 port_index)
2675 {
2676 	int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2677 						 lag_id, port_index);
2678 
2679 	return mlxsw_core->lag.mapping[index];
2680 }
2681 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
2682 
mlxsw_core_lag_mapping_clear(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 local_port)2683 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
2684 				  u16 lag_id, u8 local_port)
2685 {
2686 	int i;
2687 
2688 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
2689 		int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2690 							 lag_id, i);
2691 
2692 		if (mlxsw_core->lag.mapping[index] == local_port)
2693 			mlxsw_core->lag.mapping[index] = 0;
2694 	}
2695 }
2696 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
2697 
mlxsw_core_res_valid(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2698 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
2699 			  enum mlxsw_res_id res_id)
2700 {
2701 	return mlxsw_res_valid(&mlxsw_core->res, res_id);
2702 }
2703 EXPORT_SYMBOL(mlxsw_core_res_valid);
2704 
mlxsw_core_res_get(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2705 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
2706 		       enum mlxsw_res_id res_id)
2707 {
2708 	return mlxsw_res_get(&mlxsw_core->res, res_id);
2709 }
2710 EXPORT_SYMBOL(mlxsw_core_res_get);
2711 
__mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u8 local_port,enum devlink_port_flavour flavour,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)2712 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
2713 				  enum devlink_port_flavour flavour,
2714 				  u32 port_number, bool split,
2715 				  u32 split_port_subnumber,
2716 				  bool splittable, u32 lanes,
2717 				  const unsigned char *switch_id,
2718 				  unsigned char switch_id_len)
2719 {
2720 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
2721 	struct mlxsw_core_port *mlxsw_core_port =
2722 					&mlxsw_core->ports[local_port];
2723 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2724 	struct devlink_port_attrs attrs = {};
2725 	int err;
2726 
2727 	attrs.split = split;
2728 	attrs.lanes = lanes;
2729 	attrs.splittable = splittable;
2730 	attrs.flavour = flavour;
2731 	attrs.phys.port_number = port_number;
2732 	attrs.phys.split_subport_number = split_port_subnumber;
2733 	memcpy(attrs.switch_id.id, switch_id, switch_id_len);
2734 	attrs.switch_id.id_len = switch_id_len;
2735 	mlxsw_core_port->local_port = local_port;
2736 	devlink_port_attrs_set(devlink_port, &attrs);
2737 	err = devlink_port_register(devlink, devlink_port, local_port);
2738 	if (err)
2739 		memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
2740 	return err;
2741 }
2742 
__mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u8 local_port)2743 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
2744 {
2745 	struct mlxsw_core_port *mlxsw_core_port =
2746 					&mlxsw_core->ports[local_port];
2747 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2748 
2749 	devlink_port_unregister(devlink_port);
2750 	memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
2751 }
2752 
mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u8 local_port,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)2753 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
2754 			 u32 port_number, bool split,
2755 			 u32 split_port_subnumber,
2756 			 bool splittable, u32 lanes,
2757 			 const unsigned char *switch_id,
2758 			 unsigned char switch_id_len)
2759 {
2760 	return __mlxsw_core_port_init(mlxsw_core, local_port,
2761 				      DEVLINK_PORT_FLAVOUR_PHYSICAL,
2762 				      port_number, split, split_port_subnumber,
2763 				      splittable, lanes,
2764 				      switch_id, switch_id_len);
2765 }
2766 EXPORT_SYMBOL(mlxsw_core_port_init);
2767 
mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u8 local_port)2768 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
2769 {
2770 	__mlxsw_core_port_fini(mlxsw_core, local_port);
2771 }
2772 EXPORT_SYMBOL(mlxsw_core_port_fini);
2773 
mlxsw_core_cpu_port_init(struct mlxsw_core * mlxsw_core,void * port_driver_priv,const unsigned char * switch_id,unsigned char switch_id_len)2774 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
2775 			     void *port_driver_priv,
2776 			     const unsigned char *switch_id,
2777 			     unsigned char switch_id_len)
2778 {
2779 	struct mlxsw_core_port *mlxsw_core_port =
2780 				&mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
2781 	int err;
2782 
2783 	err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
2784 				     DEVLINK_PORT_FLAVOUR_CPU,
2785 				     0, false, 0, false, 0,
2786 				     switch_id, switch_id_len);
2787 	if (err)
2788 		return err;
2789 
2790 	mlxsw_core_port->port_driver_priv = port_driver_priv;
2791 	return 0;
2792 }
2793 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
2794 
mlxsw_core_cpu_port_fini(struct mlxsw_core * mlxsw_core)2795 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
2796 {
2797 	__mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
2798 }
2799 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
2800 
mlxsw_core_port_eth_set(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv,struct net_device * dev)2801 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
2802 			     void *port_driver_priv, struct net_device *dev)
2803 {
2804 	struct mlxsw_core_port *mlxsw_core_port =
2805 					&mlxsw_core->ports[local_port];
2806 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2807 
2808 	mlxsw_core_port->port_driver_priv = port_driver_priv;
2809 	devlink_port_type_eth_set(devlink_port, dev);
2810 }
2811 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
2812 
mlxsw_core_port_ib_set(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv)2813 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
2814 			    void *port_driver_priv)
2815 {
2816 	struct mlxsw_core_port *mlxsw_core_port =
2817 					&mlxsw_core->ports[local_port];
2818 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2819 
2820 	mlxsw_core_port->port_driver_priv = port_driver_priv;
2821 	devlink_port_type_ib_set(devlink_port, NULL);
2822 }
2823 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
2824 
mlxsw_core_port_clear(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv)2825 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
2826 			   void *port_driver_priv)
2827 {
2828 	struct mlxsw_core_port *mlxsw_core_port =
2829 					&mlxsw_core->ports[local_port];
2830 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2831 
2832 	mlxsw_core_port->port_driver_priv = port_driver_priv;
2833 	devlink_port_type_clear(devlink_port);
2834 }
2835 EXPORT_SYMBOL(mlxsw_core_port_clear);
2836 
mlxsw_core_port_type_get(struct mlxsw_core * mlxsw_core,u8 local_port)2837 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
2838 						u8 local_port)
2839 {
2840 	struct mlxsw_core_port *mlxsw_core_port =
2841 					&mlxsw_core->ports[local_port];
2842 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2843 
2844 	return devlink_port->type;
2845 }
2846 EXPORT_SYMBOL(mlxsw_core_port_type_get);
2847 
2848 
2849 struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core * mlxsw_core,u8 local_port)2850 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
2851 				 u8 local_port)
2852 {
2853 	struct mlxsw_core_port *mlxsw_core_port =
2854 					&mlxsw_core->ports[local_port];
2855 	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2856 
2857 	return devlink_port;
2858 }
2859 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
2860 
mlxsw_core_env(const struct mlxsw_core * mlxsw_core)2861 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
2862 {
2863 	return mlxsw_core->env;
2864 }
2865 
mlxsw_core_is_initialized(const struct mlxsw_core * mlxsw_core)2866 bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
2867 {
2868 	return mlxsw_core->is_initialized;
2869 }
2870 
mlxsw_core_module_max_width(struct mlxsw_core * mlxsw_core,u8 module)2871 int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
2872 {
2873 	enum mlxsw_reg_pmtm_module_type module_type;
2874 	char pmtm_pl[MLXSW_REG_PMTM_LEN];
2875 	int err;
2876 
2877 	mlxsw_reg_pmtm_pack(pmtm_pl, module);
2878 	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
2879 	if (err)
2880 		return err;
2881 	mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
2882 
2883 	/* Here we need to get the module width according to the module type. */
2884 
2885 	switch (module_type) {
2886 	case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
2887 	case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
2888 	case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
2889 		return 8;
2890 	case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
2891 	case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
2892 	case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
2893 		return 4;
2894 	case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
2895 	case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
2896 	case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
2897 	case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
2898 		return 2;
2899 	case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
2900 	case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
2901 	case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
2902 		return 1;
2903 	default:
2904 		return -EINVAL;
2905 	}
2906 }
2907 EXPORT_SYMBOL(mlxsw_core_module_max_width);
2908 
mlxsw_core_buf_dump_dbg(struct mlxsw_core * mlxsw_core,const char * buf,size_t size)2909 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
2910 				    const char *buf, size_t size)
2911 {
2912 	__be32 *m = (__be32 *) buf;
2913 	int i;
2914 	int count = size / sizeof(__be32);
2915 
2916 	for (i = count - 1; i >= 0; i--)
2917 		if (m[i])
2918 			break;
2919 	i++;
2920 	count = i ? i : 1;
2921 	for (i = 0; i < count; i += 4)
2922 		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
2923 			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
2924 			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
2925 }
2926 
mlxsw_cmd_exec(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod,bool out_mbox_direct,bool reset_ok,char * in_mbox,size_t in_mbox_size,char * out_mbox,size_t out_mbox_size)2927 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
2928 		   u32 in_mod, bool out_mbox_direct, bool reset_ok,
2929 		   char *in_mbox, size_t in_mbox_size,
2930 		   char *out_mbox, size_t out_mbox_size)
2931 {
2932 	u8 status;
2933 	int err;
2934 
2935 	BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
2936 	if (!mlxsw_core->bus->cmd_exec)
2937 		return -EOPNOTSUPP;
2938 
2939 	dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2940 		opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
2941 	if (in_mbox) {
2942 		dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
2943 		mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
2944 	}
2945 
2946 	err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
2947 					opcode_mod, in_mod, out_mbox_direct,
2948 					in_mbox, in_mbox_size,
2949 					out_mbox, out_mbox_size, &status);
2950 
2951 	if (!err && out_mbox) {
2952 		dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
2953 		mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
2954 	}
2955 
2956 	if (reset_ok && err == -EIO &&
2957 	    status == MLXSW_CMD_STATUS_RUNNING_RESET) {
2958 		err = 0;
2959 	} else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
2960 		dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
2961 			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2962 			in_mod, status, mlxsw_cmd_status_str(status));
2963 	} else if (err == -ETIMEDOUT) {
2964 		dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2965 			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2966 			in_mod);
2967 	}
2968 
2969 	return err;
2970 }
2971 EXPORT_SYMBOL(mlxsw_cmd_exec);
2972 
mlxsw_core_schedule_dw(struct delayed_work * dwork,unsigned long delay)2973 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
2974 {
2975 	return queue_delayed_work(mlxsw_wq, dwork, delay);
2976 }
2977 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
2978 
mlxsw_core_schedule_work(struct work_struct * work)2979 bool mlxsw_core_schedule_work(struct work_struct *work)
2980 {
2981 	return queue_work(mlxsw_owq, work);
2982 }
2983 EXPORT_SYMBOL(mlxsw_core_schedule_work);
2984 
mlxsw_core_flush_owq(void)2985 void mlxsw_core_flush_owq(void)
2986 {
2987 	flush_workqueue(mlxsw_owq);
2988 }
2989 EXPORT_SYMBOL(mlxsw_core_flush_owq);
2990 
mlxsw_core_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)2991 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
2992 			     const struct mlxsw_config_profile *profile,
2993 			     u64 *p_single_size, u64 *p_double_size,
2994 			     u64 *p_linear_size)
2995 {
2996 	struct mlxsw_driver *driver = mlxsw_core->driver;
2997 
2998 	if (!driver->kvd_sizes_get)
2999 		return -EINVAL;
3000 
3001 	return driver->kvd_sizes_get(mlxsw_core, profile,
3002 				     p_single_size, p_double_size,
3003 				     p_linear_size);
3004 }
3005 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
3006 
mlxsw_core_resources_query(struct mlxsw_core * mlxsw_core,char * mbox,struct mlxsw_res * res)3007 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
3008 			       struct mlxsw_res *res)
3009 {
3010 	int index, i;
3011 	u64 data;
3012 	u16 id;
3013 	int err;
3014 
3015 	if (!res)
3016 		return 0;
3017 
3018 	mlxsw_cmd_mbox_zero(mbox);
3019 
3020 	for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
3021 	     index++) {
3022 		err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
3023 		if (err)
3024 			return err;
3025 
3026 		for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
3027 			id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
3028 			data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
3029 
3030 			if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
3031 				return 0;
3032 
3033 			mlxsw_res_parse(res, id, data);
3034 		}
3035 	}
3036 
3037 	/* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
3038 	 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
3039 	 */
3040 	return -EIO;
3041 }
3042 EXPORT_SYMBOL(mlxsw_core_resources_query);
3043 
mlxsw_core_read_frc_h(struct mlxsw_core * mlxsw_core)3044 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
3045 {
3046 	return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
3047 }
3048 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
3049 
mlxsw_core_read_frc_l(struct mlxsw_core * mlxsw_core)3050 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
3051 {
3052 	return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
3053 }
3054 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
3055 
mlxsw_core_emad_string_tlv_enable(struct mlxsw_core * mlxsw_core)3056 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
3057 {
3058 	mlxsw_core->emad.enable_string_tlv = true;
3059 }
3060 EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
3061 
mlxsw_core_module_init(void)3062 static int __init mlxsw_core_module_init(void)
3063 {
3064 	int err;
3065 
3066 	mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
3067 	if (!mlxsw_wq)
3068 		return -ENOMEM;
3069 	mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
3070 					    mlxsw_core_driver_name);
3071 	if (!mlxsw_owq) {
3072 		err = -ENOMEM;
3073 		goto err_alloc_ordered_workqueue;
3074 	}
3075 	return 0;
3076 
3077 err_alloc_ordered_workqueue:
3078 	destroy_workqueue(mlxsw_wq);
3079 	return err;
3080 }
3081 
mlxsw_core_module_exit(void)3082 static void __exit mlxsw_core_module_exit(void)
3083 {
3084 	destroy_workqueue(mlxsw_owq);
3085 	destroy_workqueue(mlxsw_wq);
3086 }
3087 
3088 module_init(mlxsw_core_module_init);
3089 module_exit(mlxsw_core_module_exit);
3090 
3091 MODULE_LICENSE("Dual BSD/GPL");
3092 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3093 MODULE_DESCRIPTION("Mellanox switch device core driver");
3094