1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include "health.h"
5 #include "params.h"
6 #include "txrx.h"
7
mlx5e_query_rq_state(struct mlx5_core_dev * dev,u32 rqn,u8 * state)8 static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
9 {
10 int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
11 void *out;
12 void *rqc;
13 int err;
14
15 out = kvzalloc(outlen, GFP_KERNEL);
16 if (!out)
17 return -ENOMEM;
18
19 err = mlx5_core_query_rq(dev, rqn, out);
20 if (err)
21 goto out;
22
23 rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
24 *state = MLX5_GET(rqc, rqc, state);
25
26 out:
27 kvfree(out);
28 return err;
29 }
30
mlx5e_wait_for_icosq_flush(struct mlx5e_icosq * icosq)31 static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
32 {
33 unsigned long exp_time = jiffies +
34 msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
35
36 while (time_before(jiffies, exp_time)) {
37 if (icosq->cc == icosq->pc)
38 return 0;
39
40 msleep(20);
41 }
42
43 netdev_err(icosq->channel->netdev,
44 "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n",
45 icosq->sqn, icosq->cc, icosq->pc);
46
47 return -ETIMEDOUT;
48 }
49
mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq * icosq)50 static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
51 {
52 WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n",
53 icosq->sqn, icosq->cc, icosq->pc);
54 icosq->cc = 0;
55 icosq->pc = 0;
56 }
57
mlx5e_rx_reporter_err_icosq_cqe_recover(void * ctx)58 static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
59 {
60 struct mlx5_core_dev *mdev;
61 struct mlx5e_icosq *icosq;
62 struct net_device *dev;
63 struct mlx5e_rq *rq;
64 u8 state;
65 int err;
66
67 icosq = ctx;
68 rq = &icosq->channel->rq;
69 mdev = icosq->channel->mdev;
70 dev = icosq->channel->netdev;
71 err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
72 if (err) {
73 netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n",
74 icosq->sqn, err);
75 goto out;
76 }
77
78 if (state != MLX5_SQC_STATE_ERR)
79 goto out;
80
81 mlx5e_deactivate_rq(rq);
82 err = mlx5e_wait_for_icosq_flush(icosq);
83 if (err)
84 goto out;
85
86 mlx5e_deactivate_icosq(icosq);
87
88 /* At this point, both the rq and the icosq are disabled */
89
90 err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn);
91 if (err)
92 goto out;
93
94 mlx5e_reset_icosq_cc_pc(icosq);
95 mlx5e_free_rx_in_progress_descs(rq);
96 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
97 mlx5e_activate_icosq(icosq);
98 mlx5e_activate_rq(rq);
99
100 rq->stats->recover++;
101 return 0;
102 out:
103 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
104 return err;
105 }
106
mlx5e_rq_to_ready(struct mlx5e_rq * rq,int curr_state)107 static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
108 {
109 struct net_device *dev = rq->netdev;
110 int err;
111
112 err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
113 if (err) {
114 netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
115 return err;
116 }
117 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
118 if (err) {
119 netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
120 return err;
121 }
122
123 return 0;
124 }
125
mlx5e_rx_reporter_err_rq_cqe_recover(void * ctx)126 static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
127 {
128 struct mlx5e_rq *rq = ctx;
129 int err;
130
131 mlx5e_deactivate_rq(rq);
132 mlx5e_free_rx_descs(rq);
133
134 err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
135 if (err)
136 goto out;
137
138 clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
139 mlx5e_activate_rq(rq);
140 rq->stats->recover++;
141 return 0;
142 out:
143 clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
144 return err;
145 }
146
mlx5e_rx_reporter_timeout_recover(void * ctx)147 static int mlx5e_rx_reporter_timeout_recover(void *ctx)
148 {
149 struct mlx5e_icosq *icosq;
150 struct mlx5_eq_comp *eq;
151 struct mlx5e_rq *rq;
152 int err;
153
154 rq = ctx;
155 icosq = &rq->channel->icosq;
156 eq = rq->cq.mcq.eq;
157 err = mlx5e_health_channel_eq_recover(eq, rq->channel);
158 if (err)
159 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
160
161 return err;
162 }
163
mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx * err_ctx)164 static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
165 {
166 return err_ctx->recover(err_ctx->ctx);
167 }
168
mlx5e_rx_reporter_recover(struct devlink_health_reporter * reporter,void * context,struct netlink_ext_ack * extack)169 static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
170 void *context,
171 struct netlink_ext_ack *extack)
172 {
173 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
174 struct mlx5e_err_ctx *err_ctx = context;
175
176 return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) :
177 mlx5e_health_recover_channels(priv);
178 }
179
mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq * icosq,u8 hw_state,struct devlink_fmsg * fmsg)180 static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
181 struct devlink_fmsg *fmsg)
182 {
183 int err;
184
185 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
186 if (err)
187 return err;
188
189 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
190 if (err)
191 return err;
192
193 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
194 if (err)
195 return err;
196
197 err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
198 if (err)
199 return err;
200
201 err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
202 if (err)
203 return err;
204
205 err = devlink_fmsg_u32_pair_put(fmsg, "WQE size",
206 mlx5_wq_cyc_get_size(&icosq->wq));
207 if (err)
208 return err;
209
210 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
211 if (err)
212 return err;
213
214 err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
215 if (err)
216 return err;
217
218 err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
219 if (err)
220 return err;
221
222 err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
223 if (err)
224 return err;
225
226 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
227 if (err)
228 return err;
229
230 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
231 }
232
mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq * rq,struct devlink_fmsg * fmsg)233 static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
234 struct devlink_fmsg *fmsg)
235 {
236 struct mlx5e_priv *priv = rq->channel->priv;
237 struct mlx5e_icosq *icosq;
238 u8 icosq_hw_state;
239 u16 wqe_counter;
240 int wqes_sz;
241 u8 hw_state;
242 u16 wq_head;
243 int err;
244
245 icosq = &rq->channel->icosq;
246 err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
247 if (err)
248 return err;
249
250 err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
251 if (err)
252 return err;
253
254 wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
255 wq_head = mlx5e_rqwq_get_head(rq);
256 wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
257
258 err = devlink_fmsg_obj_nest_start(fmsg);
259 if (err)
260 return err;
261
262 err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
263 if (err)
264 return err;
265
266 err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
267 if (err)
268 return err;
269
270 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
271 if (err)
272 return err;
273
274 err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
275 if (err)
276 return err;
277
278 err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
279 if (err)
280 return err;
281
282 err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
283 if (err)
284 return err;
285
286 err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
287 if (err)
288 return err;
289
290 err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
291 if (err)
292 return err;
293
294 err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
295 if (err)
296 return err;
297
298 err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
299 if (err)
300 return err;
301
302 err = devlink_fmsg_obj_nest_end(fmsg);
303 if (err)
304 return err;
305
306 return 0;
307 }
308
mlx5e_rx_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)309 static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
310 struct devlink_fmsg *fmsg,
311 struct netlink_ext_ack *extack)
312 {
313 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
314 struct mlx5e_params *params = &priv->channels.params;
315 struct mlx5e_rq *generic_rq;
316 u32 rq_stride, rq_sz;
317 int i, err = 0;
318
319 mutex_lock(&priv->state_lock);
320
321 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
322 goto unlock;
323
324 generic_rq = &priv->channels.c[0]->rq;
325 rq_sz = mlx5e_rqwq_get_size(generic_rq);
326 rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
327
328 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
329 if (err)
330 goto unlock;
331
332 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
333 if (err)
334 goto unlock;
335
336 err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
337 if (err)
338 goto unlock;
339
340 err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
341 if (err)
342 goto unlock;
343
344 err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
345 if (err)
346 goto unlock;
347
348 err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg);
349 if (err)
350 goto unlock;
351
352 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
353 if (err)
354 goto unlock;
355
356 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
357 if (err)
358 goto unlock;
359
360 err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
361 if (err)
362 goto unlock;
363
364 for (i = 0; i < priv->channels.num; i++) {
365 struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
366
367 err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
368 if (err)
369 goto unlock;
370 }
371 err = devlink_fmsg_arr_pair_nest_end(fmsg);
372 if (err)
373 goto unlock;
374 unlock:
375 mutex_unlock(&priv->state_lock);
376 return err;
377 }
378
mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)379 static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
380 void *ctx)
381 {
382 struct mlx5e_txqsq *icosq = ctx;
383 struct mlx5_rsc_key key = {};
384 int err;
385
386 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
387 return 0;
388
389 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
390 if (err)
391 return err;
392
393 key.size = PAGE_SIZE;
394 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
395 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
396 if (err)
397 return err;
398
399 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
400 if (err)
401 return err;
402
403 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
404 if (err)
405 return err;
406
407 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
408 if (err)
409 return err;
410
411 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
412 key.index1 = icosq->sqn;
413 key.num_of_obj1 = 1;
414
415 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
416 if (err)
417 return err;
418
419 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
420 if (err)
421 return err;
422
423 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
424 if (err)
425 return err;
426
427 key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
428 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
429
430 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
431 if (err)
432 return err;
433
434 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
435 if (err)
436 return err;
437
438 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
439 }
440
mlx5e_rx_reporter_dump_rq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)441 static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
442 void *ctx)
443 {
444 struct mlx5_rsc_key key = {};
445 struct mlx5e_rq *rq = ctx;
446 int err;
447
448 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
449 return 0;
450
451 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
452 if (err)
453 return err;
454
455 key.size = PAGE_SIZE;
456 key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
457 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
458 if (err)
459 return err;
460
461 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
462 if (err)
463 return err;
464
465 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
466 if (err)
467 return err;
468
469 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
470 if (err)
471 return err;
472
473 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
474 key.index1 = rq->rqn;
475 key.num_of_obj1 = 1;
476
477 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
478 if (err)
479 return err;
480
481 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
482 if (err)
483 return err;
484
485 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
486 if (err)
487 return err;
488
489 key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
490 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
491 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
492 if (err)
493 return err;
494
495 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
496 if (err)
497 return err;
498
499 return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
500 }
501
mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg)502 static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
503 struct devlink_fmsg *fmsg)
504 {
505 struct mlx5_rsc_key key = {};
506 int i, err;
507
508 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
509 return 0;
510
511 err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
512 if (err)
513 return err;
514
515 key.size = PAGE_SIZE;
516 key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
517 err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
518 if (err)
519 return err;
520
521 err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
522 if (err)
523 return err;
524
525 err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
526 if (err)
527 return err;
528
529 for (i = 0; i < priv->channels.num; i++) {
530 struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
531
532 err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
533 if (err)
534 return err;
535 }
536
537 return devlink_fmsg_arr_pair_nest_end(fmsg);
538 }
539
mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv * priv,struct mlx5e_err_ctx * err_ctx,struct devlink_fmsg * fmsg)540 static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
541 struct mlx5e_err_ctx *err_ctx,
542 struct devlink_fmsg *fmsg)
543 {
544 return err_ctx->dump(priv, fmsg, err_ctx->ctx);
545 }
546
mlx5e_rx_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * context,struct netlink_ext_ack * extack)547 static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
548 struct devlink_fmsg *fmsg, void *context,
549 struct netlink_ext_ack *extack)
550 {
551 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
552 struct mlx5e_err_ctx *err_ctx = context;
553
554 return err_ctx ? mlx5e_rx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
555 mlx5e_rx_reporter_dump_all_rqs(priv, fmsg);
556 }
557
mlx5e_reporter_rx_timeout(struct mlx5e_rq * rq)558 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
559 {
560 struct mlx5e_icosq *icosq = &rq->channel->icosq;
561 struct mlx5e_priv *priv = rq->channel->priv;
562 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
563 struct mlx5e_err_ctx err_ctx = {};
564
565 err_ctx.ctx = rq;
566 err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
567 err_ctx.dump = mlx5e_rx_reporter_dump_rq;
568 snprintf(err_str, sizeof(err_str),
569 "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
570 icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
571
572 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
573 }
574
mlx5e_reporter_rq_cqe_err(struct mlx5e_rq * rq)575 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
576 {
577 struct mlx5e_priv *priv = rq->channel->priv;
578 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
579 struct mlx5e_err_ctx err_ctx = {};
580
581 err_ctx.ctx = rq;
582 err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
583 err_ctx.dump = mlx5e_rx_reporter_dump_rq;
584 snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
585
586 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
587 }
588
mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq * icosq)589 void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
590 {
591 struct mlx5e_priv *priv = icosq->channel->priv;
592 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
593 struct mlx5e_err_ctx err_ctx = {};
594
595 err_ctx.ctx = icosq;
596 err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
597 err_ctx.dump = mlx5e_rx_reporter_dump_icosq;
598 snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
599
600 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
601 }
602
603 static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
604 .name = "rx",
605 .recover = mlx5e_rx_reporter_recover,
606 .diagnose = mlx5e_rx_reporter_diagnose,
607 .dump = mlx5e_rx_reporter_dump,
608 };
609
610 #define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
611
mlx5e_reporter_rx_create(struct mlx5e_priv * priv)612 void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
613 {
614 struct devlink_health_reporter *reporter;
615
616 reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops,
617 MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
618 if (IS_ERR(reporter)) {
619 netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
620 PTR_ERR(reporter));
621 return;
622 }
623 priv->rx_reporter = reporter;
624 }
625
mlx5e_reporter_rx_destroy(struct mlx5e_priv * priv)626 void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
627 {
628 if (!priv->rx_reporter)
629 return;
630
631 devlink_port_health_reporter_destroy(priv->rx_reporter);
632 }
633