• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/random.h>
36 #include <linux/vmalloc.h>
37 #include <linux/hardirq.h>
38 #include <linux/mlx5/driver.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41 #include "lib/mlx5.h"
42 #include "lib/pci_vsc.h"
43 #include "diag/fw_tracer.h"
44 
45 enum {
46 	MLX5_HEALTH_POLL_INTERVAL	= 2 * HZ,
47 	MAX_MISSES			= 3,
48 };
49 
50 enum {
51 	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1,
52 	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7,
53 	MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR	= 0x8,
54 	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9,
55 	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa,
56 	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb,
57 	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc,
58 	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd,
59 	MLX5_HEALTH_SYNDR_EQ_INV		= 0xe,
60 	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf,
61 	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
62 };
63 
64 enum {
65 	MLX5_DROP_NEW_HEALTH_WORK,
66 };
67 
68 enum  {
69 	MLX5_SENSOR_NO_ERR		= 0,
70 	MLX5_SENSOR_PCI_COMM_ERR	= 1,
71 	MLX5_SENSOR_PCI_ERR		= 2,
72 	MLX5_SENSOR_NIC_DISABLED	= 3,
73 	MLX5_SENSOR_NIC_SW_RESET	= 4,
74 	MLX5_SENSOR_FW_SYND_RFR		= 5,
75 };
76 
mlx5_get_nic_state(struct mlx5_core_dev * dev)77 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
78 {
79 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
80 }
81 
mlx5_set_nic_state(struct mlx5_core_dev * dev,u8 state)82 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
83 {
84 	u32 cur_cmdq_addr_l_sz;
85 
86 	cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
87 	iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
88 		    state << MLX5_NIC_IFC_OFFSET,
89 		    &dev->iseg->cmdq_addr_l_sz);
90 }
91 
sensor_pci_not_working(struct mlx5_core_dev * dev)92 static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
93 {
94 	struct mlx5_core_health *health = &dev->priv.health;
95 	struct health_buffer __iomem *h = health->health;
96 
97 	/* Offline PCI reads return 0xffffffff */
98 	return (ioread32be(&h->fw_ver) == 0xffffffff);
99 }
100 
sensor_fw_synd_rfr(struct mlx5_core_dev * dev)101 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
102 {
103 	struct mlx5_core_health *health = &dev->priv.health;
104 	struct health_buffer __iomem *h = health->health;
105 	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
106 	u8 synd = ioread8(&h->synd);
107 
108 	if (rfr && synd)
109 		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
110 	return rfr && synd;
111 }
112 
mlx5_health_check_fatal_sensors(struct mlx5_core_dev * dev)113 u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
114 {
115 	if (sensor_pci_not_working(dev))
116 		return MLX5_SENSOR_PCI_COMM_ERR;
117 	if (pci_channel_offline(dev->pdev))
118 		return MLX5_SENSOR_PCI_ERR;
119 	if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
120 		return MLX5_SENSOR_NIC_DISABLED;
121 	if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
122 		return MLX5_SENSOR_NIC_SW_RESET;
123 	if (sensor_fw_synd_rfr(dev))
124 		return MLX5_SENSOR_FW_SYND_RFR;
125 
126 	return MLX5_SENSOR_NO_ERR;
127 }
128 
lock_sem_sw_reset(struct mlx5_core_dev * dev,bool lock)129 static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock)
130 {
131 	enum mlx5_vsc_state state;
132 	int ret;
133 
134 	if (!mlx5_core_is_pf(dev))
135 		return -EBUSY;
136 
137 	/* Try to lock GW access, this stage doesn't return
138 	 * EBUSY because locked GW does not mean that other PF
139 	 * already started the reset.
140 	 */
141 	ret = mlx5_vsc_gw_lock(dev);
142 	if (ret == -EBUSY)
143 		return -EINVAL;
144 	if (ret)
145 		return ret;
146 
147 	state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK;
148 	/* At this stage, if the return status == EBUSY, then we know
149 	 * for sure that another PF started the reset, so don't allow
150 	 * another reset.
151 	 */
152 	ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state);
153 	if (ret)
154 		mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
155 
156 	/* Unlock GW access */
157 	mlx5_vsc_gw_unlock(dev);
158 
159 	return ret;
160 }
161 
reset_fw_if_needed(struct mlx5_core_dev * dev)162 static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
163 {
164 	bool supported = (ioread32be(&dev->iseg->initializing) >>
165 			  MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
166 	u32 fatal_error;
167 
168 	if (!supported)
169 		return false;
170 
171 	/* The reset only needs to be issued by one PF. The health buffer is
172 	 * shared between all functions, and will be cleared during a reset.
173 	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
174 	 * PCI related a reset won't help.
175 	 */
176 	fatal_error = mlx5_health_check_fatal_sensors(dev);
177 	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
178 	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
179 	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
180 		mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.");
181 		return false;
182 	}
183 
184 	mlx5_core_warn(dev, "Issuing FW Reset\n");
185 	/* Write the NIC interface field to initiate the reset, the command
186 	 * interface address also resides here, don't overwrite it.
187 	 */
188 	mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
189 
190 	return true;
191 }
192 
enter_error_state(struct mlx5_core_dev * dev,bool force)193 static void enter_error_state(struct mlx5_core_dev *dev, bool force)
194 {
195 	if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
196 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
197 		mlx5_cmd_flush(dev);
198 	}
199 
200 	mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
201 }
202 
mlx5_enter_error_state(struct mlx5_core_dev * dev,bool force)203 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
204 {
205 	bool err_detected = false;
206 
207 	/* Mark the device as fatal in order to abort FW commands */
208 	if ((mlx5_health_check_fatal_sensors(dev) || force) &&
209 	    dev->state == MLX5_DEVICE_STATE_UP) {
210 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
211 		err_detected = true;
212 	}
213 	mutex_lock(&dev->intf_state_mutex);
214 	if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
215 		goto unlock;/* a previous error is still being handled */
216 	if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
217 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
218 		goto unlock;
219 	}
220 
221 	enter_error_state(dev, force);
222 unlock:
223 	mutex_unlock(&dev->intf_state_mutex);
224 }
225 
226 #define MLX5_CRDUMP_WAIT_MS	60000
227 #define MLX5_FW_RESET_WAIT_MS	1000
mlx5_error_sw_reset(struct mlx5_core_dev * dev)228 void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
229 {
230 	unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
231 	int lock = -EBUSY;
232 
233 	mutex_lock(&dev->intf_state_mutex);
234 	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
235 		goto unlock;
236 
237 	mlx5_core_err(dev, "start\n");
238 
239 	if (mlx5_health_check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
240 		/* Get cr-dump and reset FW semaphore */
241 		lock = lock_sem_sw_reset(dev, true);
242 
243 		if (lock == -EBUSY) {
244 			delay_ms = MLX5_CRDUMP_WAIT_MS;
245 			goto recover_from_sw_reset;
246 		}
247 		/* Execute SW reset */
248 		reset_fw_if_needed(dev);
249 	}
250 
251 recover_from_sw_reset:
252 	/* Recover from SW reset */
253 	end = jiffies + msecs_to_jiffies(delay_ms);
254 	do {
255 		if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
256 			break;
257 
258 		msleep(20);
259 	} while (!time_after(jiffies, end));
260 
261 	if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
262 		dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
263 			mlx5_get_nic_state(dev), delay_ms);
264 	}
265 
266 	/* Release FW semaphore if you are the lock owner */
267 	if (!lock)
268 		lock_sem_sw_reset(dev, false);
269 
270 	mlx5_core_err(dev, "end\n");
271 
272 unlock:
273 	mutex_unlock(&dev->intf_state_mutex);
274 }
275 
mlx5_handle_bad_state(struct mlx5_core_dev * dev)276 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
277 {
278 	u8 nic_interface = mlx5_get_nic_state(dev);
279 
280 	switch (nic_interface) {
281 	case MLX5_NIC_IFC_FULL:
282 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
283 		break;
284 
285 	case MLX5_NIC_IFC_DISABLED:
286 		mlx5_core_warn(dev, "starting teardown\n");
287 		break;
288 
289 	case MLX5_NIC_IFC_NO_DRAM_NIC:
290 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
291 		break;
292 
293 	case MLX5_NIC_IFC_SW_RESET:
294 		/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
295 		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
296 		 *    and this is a VF), this is not recoverable by SW reset.
297 		 *    Logging of this is handled elsewhere.
298 		 * 2. FW reset has been issued by another function, driver can
299 		 *    be reloaded to recover after the mode switches to
300 		 *    MLX5_NIC_IFC_DISABLED.
301 		 */
302 		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
303 			mlx5_core_warn(dev, "NIC SW reset in progress\n");
304 		break;
305 
306 	default:
307 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
308 			       nic_interface);
309 	}
310 
311 	mlx5_disable_device(dev);
312 }
313 
314 /* How much time to wait until health resetting the driver (in msecs) */
315 #define MLX5_RECOVERY_WAIT_MSECS 60000
mlx5_health_wait_pci_up(struct mlx5_core_dev * dev)316 int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
317 {
318 	unsigned long end;
319 
320 	end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
321 	while (sensor_pci_not_working(dev)) {
322 		if (time_after(jiffies, end))
323 			return -ETIMEDOUT;
324 		msleep(100);
325 	}
326 	return 0;
327 }
328 
mlx5_health_try_recover(struct mlx5_core_dev * dev)329 static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
330 {
331 	mlx5_core_warn(dev, "handling bad device here\n");
332 	mlx5_handle_bad_state(dev);
333 	if (mlx5_health_wait_pci_up(dev)) {
334 		mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n");
335 		return -EIO;
336 	}
337 	mlx5_core_err(dev, "starting health recovery flow\n");
338 	mlx5_recover_device(dev);
339 	if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
340 	    mlx5_health_check_fatal_sensors(dev)) {
341 		mlx5_core_err(dev, "health recovery failed\n");
342 		return -EIO;
343 	}
344 	return 0;
345 }
346 
hsynd_str(u8 synd)347 static const char *hsynd_str(u8 synd)
348 {
349 	switch (synd) {
350 	case MLX5_HEALTH_SYNDR_FW_ERR:
351 		return "firmware internal error";
352 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
353 		return "irisc not responding";
354 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
355 		return "unrecoverable hardware error";
356 	case MLX5_HEALTH_SYNDR_CRC_ERR:
357 		return "firmware CRC error";
358 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
359 		return "ICM fetch PCI error";
360 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
361 		return "HW fatal error\n";
362 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
363 		return "async EQ buffer overrun";
364 	case MLX5_HEALTH_SYNDR_EQ_ERR:
365 		return "EQ error";
366 	case MLX5_HEALTH_SYNDR_EQ_INV:
367 		return "Invalid EQ referenced";
368 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
369 		return "FFSER error";
370 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
371 		return "High temperature";
372 	default:
373 		return "unrecognized error";
374 	}
375 }
376 
print_health_info(struct mlx5_core_dev * dev)377 static void print_health_info(struct mlx5_core_dev *dev)
378 {
379 	struct mlx5_core_health *health = &dev->priv.health;
380 	struct health_buffer __iomem *h = health->health;
381 	char fw_str[18];
382 	u32 fw;
383 	int i;
384 
385 	/* If the syndrome is 0, the device is OK and no need to print buffer */
386 	if (!ioread8(&h->synd))
387 		return;
388 
389 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
390 		mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
391 			      ioread32be(h->assert_var + i));
392 
393 	mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
394 		      ioread32be(&h->assert_exit_ptr));
395 	mlx5_core_err(dev, "assert_callra 0x%08x\n",
396 		      ioread32be(&h->assert_callra));
397 	sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
398 	mlx5_core_err(dev, "fw_ver %s\n", fw_str);
399 	mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
400 	mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
401 	mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
402 		      hsynd_str(ioread8(&h->synd)));
403 	mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
404 	fw = ioread32be(&h->fw_ver);
405 	mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
406 }
407 
408 static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)409 mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
410 			  struct devlink_fmsg *fmsg,
411 			  struct netlink_ext_ack *extack)
412 {
413 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
414 	struct mlx5_core_health *health = &dev->priv.health;
415 	struct health_buffer __iomem *h = health->health;
416 	u8 synd;
417 	int err;
418 
419 	synd = ioread8(&h->synd);
420 	err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
421 	if (err || !synd)
422 		return err;
423 	return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
424 }
425 
426 struct mlx5_fw_reporter_ctx {
427 	u8 err_synd;
428 	int miss_counter;
429 };
430 
431 static int
mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg * fmsg,struct mlx5_fw_reporter_ctx * fw_reporter_ctx)432 mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
433 			       struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
434 {
435 	int err;
436 
437 	err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
438 				       fw_reporter_ctx->err_synd);
439 	if (err)
440 		return err;
441 	err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
442 					fw_reporter_ctx->miss_counter);
443 	if (err)
444 		return err;
445 	return 0;
446 }
447 
448 static int
mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev * dev,struct devlink_fmsg * fmsg)449 mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
450 				       struct devlink_fmsg *fmsg)
451 {
452 	struct mlx5_core_health *health = &dev->priv.health;
453 	struct health_buffer __iomem *h = health->health;
454 	int err;
455 	int i;
456 
457 	if (!ioread8(&h->synd))
458 		return 0;
459 
460 	err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
461 	if (err)
462 		return err;
463 	err = devlink_fmsg_obj_nest_start(fmsg);
464 	if (err)
465 		return err;
466 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
467 	if (err)
468 		return err;
469 
470 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
471 		err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
472 		if (err)
473 			return err;
474 	}
475 	err = devlink_fmsg_arr_pair_nest_end(fmsg);
476 	if (err)
477 		return err;
478 	err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
479 					ioread32be(&h->assert_exit_ptr));
480 	if (err)
481 		return err;
482 	err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
483 					ioread32be(&h->assert_callra));
484 	if (err)
485 		return err;
486 	err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
487 	if (err)
488 		return err;
489 	err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
490 				       ioread8(&h->irisc_index));
491 	if (err)
492 		return err;
493 	err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
494 	if (err)
495 		return err;
496 	err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
497 					ioread16be(&h->ext_synd));
498 	if (err)
499 		return err;
500 	err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
501 					ioread32be(&h->fw_ver));
502 	if (err)
503 		return err;
504 	err = devlink_fmsg_obj_nest_end(fmsg);
505 	if (err)
506 		return err;
507 	return devlink_fmsg_pair_nest_end(fmsg);
508 }
509 
510 static int
mlx5_fw_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)511 mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
512 		      struct devlink_fmsg *fmsg, void *priv_ctx,
513 		      struct netlink_ext_ack *extack)
514 {
515 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
516 	int err;
517 
518 	err = mlx5_fw_tracer_trigger_core_dump_general(dev);
519 	if (err)
520 		return err;
521 
522 	if (priv_ctx) {
523 		struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
524 
525 		err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
526 		if (err)
527 			return err;
528 	}
529 
530 	err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
531 	if (err)
532 		return err;
533 	return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
534 }
535 
mlx5_fw_reporter_err_work(struct work_struct * work)536 static void mlx5_fw_reporter_err_work(struct work_struct *work)
537 {
538 	struct mlx5_fw_reporter_ctx fw_reporter_ctx;
539 	struct mlx5_core_health *health;
540 
541 	health = container_of(work, struct mlx5_core_health, report_work);
542 
543 	if (IS_ERR_OR_NULL(health->fw_reporter))
544 		return;
545 
546 	fw_reporter_ctx.err_synd = health->synd;
547 	fw_reporter_ctx.miss_counter = health->miss_counter;
548 	if (fw_reporter_ctx.err_synd) {
549 		devlink_health_report(health->fw_reporter,
550 				      "FW syndrom reported", &fw_reporter_ctx);
551 		return;
552 	}
553 	if (fw_reporter_ctx.miss_counter)
554 		devlink_health_report(health->fw_reporter,
555 				      "FW miss counter reported",
556 				      &fw_reporter_ctx);
557 }
558 
559 static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
560 		.name = "fw",
561 		.diagnose = mlx5_fw_reporter_diagnose,
562 		.dump = mlx5_fw_reporter_dump,
563 };
564 
565 static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter * reporter,void * priv_ctx,struct netlink_ext_ack * extack)566 mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
567 			       void *priv_ctx,
568 			       struct netlink_ext_ack *extack)
569 {
570 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
571 
572 	return mlx5_health_try_recover(dev);
573 }
574 
575 static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)576 mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
577 			    struct devlink_fmsg *fmsg, void *priv_ctx,
578 			    struct netlink_ext_ack *extack)
579 {
580 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
581 	u32 crdump_size = dev->priv.health.crdump_size;
582 	u32 *cr_data;
583 	int err;
584 
585 	if (!mlx5_core_is_pf(dev))
586 		return -EPERM;
587 
588 	cr_data = kvmalloc(crdump_size, GFP_KERNEL);
589 	if (!cr_data)
590 		return -ENOMEM;
591 	err = mlx5_crdump_collect(dev, cr_data);
592 	if (err)
593 		goto free_data;
594 
595 	if (priv_ctx) {
596 		struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
597 
598 		err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
599 		if (err)
600 			goto free_data;
601 	}
602 
603 	err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
604 
605 free_data:
606 	kvfree(cr_data);
607 	return err;
608 }
609 
mlx5_fw_fatal_reporter_err_work(struct work_struct * work)610 static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
611 {
612 	struct mlx5_fw_reporter_ctx fw_reporter_ctx;
613 	struct mlx5_core_health *health;
614 	struct mlx5_core_dev *dev;
615 	struct mlx5_priv *priv;
616 
617 	health = container_of(work, struct mlx5_core_health, fatal_report_work);
618 	priv = container_of(health, struct mlx5_priv, health);
619 	dev = container_of(priv, struct mlx5_core_dev, priv);
620 
621 	mutex_lock(&dev->intf_state_mutex);
622 	if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
623 		mlx5_core_err(dev, "health works are not permitted at this stage\n");
624 		mutex_unlock(&dev->intf_state_mutex);
625 		return;
626 	}
627 	mutex_unlock(&dev->intf_state_mutex);
628 	enter_error_state(dev, false);
629 	if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
630 		if (mlx5_health_try_recover(dev))
631 			mlx5_core_err(dev, "health recovery failed\n");
632 		return;
633 	}
634 	fw_reporter_ctx.err_synd = health->synd;
635 	fw_reporter_ctx.miss_counter = health->miss_counter;
636 	devlink_health_report(health->fw_fatal_reporter,
637 			      "FW fatal error reported", &fw_reporter_ctx);
638 }
639 
640 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
641 		.name = "fw_fatal",
642 		.recover = mlx5_fw_fatal_reporter_recover,
643 		.dump = mlx5_fw_fatal_reporter_dump,
644 };
645 
646 #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
mlx5_fw_reporters_create(struct mlx5_core_dev * dev)647 static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
648 {
649 	struct mlx5_core_health *health = &dev->priv.health;
650 	struct devlink *devlink = priv_to_devlink(dev);
651 
652 	health->fw_reporter =
653 		devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
654 					       0, dev);
655 	if (IS_ERR(health->fw_reporter))
656 		mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
657 			       PTR_ERR(health->fw_reporter));
658 
659 	health->fw_fatal_reporter =
660 		devlink_health_reporter_create(devlink,
661 					       &mlx5_fw_fatal_reporter_ops,
662 					       MLX5_REPORTER_FW_GRACEFUL_PERIOD,
663 					       dev);
664 	if (IS_ERR(health->fw_fatal_reporter))
665 		mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
666 			       PTR_ERR(health->fw_fatal_reporter));
667 }
668 
mlx5_fw_reporters_destroy(struct mlx5_core_dev * dev)669 static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
670 {
671 	struct mlx5_core_health *health = &dev->priv.health;
672 
673 	if (!IS_ERR_OR_NULL(health->fw_reporter))
674 		devlink_health_reporter_destroy(health->fw_reporter);
675 
676 	if (!IS_ERR_OR_NULL(health->fw_fatal_reporter))
677 		devlink_health_reporter_destroy(health->fw_fatal_reporter);
678 }
679 
get_next_poll_jiffies(void)680 static unsigned long get_next_poll_jiffies(void)
681 {
682 	unsigned long next;
683 
684 	get_random_bytes(&next, sizeof(next));
685 	next %= HZ;
686 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
687 
688 	return next;
689 }
690 
mlx5_trigger_health_work(struct mlx5_core_dev * dev)691 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
692 {
693 	struct mlx5_core_health *health = &dev->priv.health;
694 	unsigned long flags;
695 
696 	spin_lock_irqsave(&health->wq_lock, flags);
697 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
698 		queue_work(health->wq, &health->fatal_report_work);
699 	else
700 		mlx5_core_err(dev, "new health works are not permitted at this stage\n");
701 	spin_unlock_irqrestore(&health->wq_lock, flags);
702 }
703 
poll_health(struct timer_list * t)704 static void poll_health(struct timer_list *t)
705 {
706 	struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
707 	struct mlx5_core_health *health = &dev->priv.health;
708 	struct health_buffer __iomem *h = health->health;
709 	u32 fatal_error;
710 	u8 prev_synd;
711 	u32 count;
712 
713 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
714 		goto out;
715 
716 	fatal_error = mlx5_health_check_fatal_sensors(dev);
717 
718 	if (fatal_error && !health->fatal_error) {
719 		mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
720 		dev->priv.health.fatal_error = fatal_error;
721 		print_health_info(dev);
722 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
723 		mlx5_trigger_health_work(dev);
724 		return;
725 	}
726 
727 	count = ioread32be(health->health_counter);
728 	if (count == health->prev)
729 		++health->miss_counter;
730 	else
731 		health->miss_counter = 0;
732 
733 	health->prev = count;
734 	if (health->miss_counter == MAX_MISSES) {
735 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
736 		print_health_info(dev);
737 		queue_work(health->wq, &health->report_work);
738 	}
739 
740 	prev_synd = health->synd;
741 	health->synd = ioread8(&h->synd);
742 	if (health->synd && health->synd != prev_synd)
743 		queue_work(health->wq, &health->report_work);
744 
745 out:
746 	mod_timer(&health->timer, get_next_poll_jiffies());
747 }
748 
mlx5_start_health_poll(struct mlx5_core_dev * dev)749 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
750 {
751 	struct mlx5_core_health *health = &dev->priv.health;
752 
753 	timer_setup(&health->timer, poll_health, 0);
754 	health->fatal_error = MLX5_SENSOR_NO_ERR;
755 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
756 	health->health = &dev->iseg->health;
757 	health->health_counter = &dev->iseg->health_counter;
758 
759 	health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
760 	add_timer(&health->timer);
761 }
762 
mlx5_stop_health_poll(struct mlx5_core_dev * dev,bool disable_health)763 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
764 {
765 	struct mlx5_core_health *health = &dev->priv.health;
766 	unsigned long flags;
767 
768 	if (disable_health) {
769 		spin_lock_irqsave(&health->wq_lock, flags);
770 		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
771 		spin_unlock_irqrestore(&health->wq_lock, flags);
772 	}
773 
774 	del_timer_sync(&health->timer);
775 }
776 
mlx5_drain_health_wq(struct mlx5_core_dev * dev)777 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
778 {
779 	struct mlx5_core_health *health = &dev->priv.health;
780 	unsigned long flags;
781 
782 	spin_lock_irqsave(&health->wq_lock, flags);
783 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
784 	spin_unlock_irqrestore(&health->wq_lock, flags);
785 	cancel_work_sync(&health->report_work);
786 	cancel_work_sync(&health->fatal_report_work);
787 }
788 
mlx5_health_flush(struct mlx5_core_dev * dev)789 void mlx5_health_flush(struct mlx5_core_dev *dev)
790 {
791 	struct mlx5_core_health *health = &dev->priv.health;
792 
793 	flush_workqueue(health->wq);
794 }
795 
mlx5_health_cleanup(struct mlx5_core_dev * dev)796 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
797 {
798 	struct mlx5_core_health *health = &dev->priv.health;
799 
800 	destroy_workqueue(health->wq);
801 	mlx5_fw_reporters_destroy(dev);
802 }
803 
mlx5_health_init(struct mlx5_core_dev * dev)804 int mlx5_health_init(struct mlx5_core_dev *dev)
805 {
806 	struct mlx5_core_health *health;
807 	char *name;
808 
809 	mlx5_fw_reporters_create(dev);
810 
811 	health = &dev->priv.health;
812 	name = kmalloc(64, GFP_KERNEL);
813 	if (!name)
814 		goto out_err;
815 
816 	strcpy(name, "mlx5_health");
817 	strcat(name, dev_name(dev->device));
818 	health->wq = create_singlethread_workqueue(name);
819 	kfree(name);
820 	if (!health->wq)
821 		goto out_err;
822 	spin_lock_init(&health->wq_lock);
823 	INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
824 	INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
825 
826 	return 0;
827 
828 out_err:
829 	mlx5_fw_reporters_destroy(dev);
830 	return -ENOMEM;
831 }
832