1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include<linux/bitfield.h>
9
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13
14 #define DRV_NAME "octeontx2-af"
15
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)16 static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
17 {
18 int err;
19
20 err = devlink_fmsg_pair_nest_start(fmsg, name);
21 if (err)
22 return err;
23
24 return devlink_fmsg_obj_nest_start(fmsg);
25 }
26
rvu_report_pair_end(struct devlink_fmsg * fmsg)27 static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
28 {
29 int err;
30
31 err = devlink_fmsg_obj_nest_end(fmsg);
32 if (err)
33 return err;
34
35 return devlink_fmsg_pair_nest_end(fmsg);
36 }
37
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)38 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
39 const char *name, irq_handler_t fn)
40 {
41 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
42 int rc;
43
44 sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
45 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
46 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
47 if (rc)
48 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
49 else
50 rvu->irq_allocated[offset] = true;
51
52 return rvu->irq_allocated[offset];
53 }
54
rvu_nix_intr_work(struct work_struct * work)55 static void rvu_nix_intr_work(struct work_struct *work)
56 {
57 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
58
59 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
60 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
61 "NIX_AF_RVU Error",
62 rvu_nix_health_reporter->nix_event_ctx);
63 }
64
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)65 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
66 {
67 struct rvu_nix_event_ctx *nix_event_context;
68 struct rvu_devlink *rvu_dl = rvu_irq;
69 struct rvu *rvu;
70 int blkaddr;
71 u64 intr;
72
73 rvu = rvu_dl->rvu;
74 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
75 if (blkaddr < 0)
76 return IRQ_NONE;
77
78 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
79 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
80 nix_event_context->nix_af_rvu_int = intr;
81
82 /* Clear interrupts */
83 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
84 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
85 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
86
87 return IRQ_HANDLED;
88 }
89
rvu_nix_gen_work(struct work_struct * work)90 static void rvu_nix_gen_work(struct work_struct *work)
91 {
92 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
93
94 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
95 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
96 "NIX_AF_GEN Error",
97 rvu_nix_health_reporter->nix_event_ctx);
98 }
99
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)100 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
101 {
102 struct rvu_nix_event_ctx *nix_event_context;
103 struct rvu_devlink *rvu_dl = rvu_irq;
104 struct rvu *rvu;
105 int blkaddr;
106 u64 intr;
107
108 rvu = rvu_dl->rvu;
109 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
110 if (blkaddr < 0)
111 return IRQ_NONE;
112
113 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
114 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
115 nix_event_context->nix_af_rvu_gen = intr;
116
117 /* Clear interrupts */
118 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
119 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
120 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
121
122 return IRQ_HANDLED;
123 }
124
rvu_nix_err_work(struct work_struct * work)125 static void rvu_nix_err_work(struct work_struct *work)
126 {
127 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
128
129 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
130 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
131 "NIX_AF_ERR Error",
132 rvu_nix_health_reporter->nix_event_ctx);
133 }
134
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)135 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
136 {
137 struct rvu_nix_event_ctx *nix_event_context;
138 struct rvu_devlink *rvu_dl = rvu_irq;
139 struct rvu *rvu;
140 int blkaddr;
141 u64 intr;
142
143 rvu = rvu_dl->rvu;
144 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
145 if (blkaddr < 0)
146 return IRQ_NONE;
147
148 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
149 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
150 nix_event_context->nix_af_rvu_err = intr;
151
152 /* Clear interrupts */
153 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
154 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
155 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
156
157 return IRQ_HANDLED;
158 }
159
rvu_nix_ras_work(struct work_struct * work)160 static void rvu_nix_ras_work(struct work_struct *work)
161 {
162 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
163
164 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
165 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
166 "NIX_AF_RAS Error",
167 rvu_nix_health_reporter->nix_event_ctx);
168 }
169
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)170 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
171 {
172 struct rvu_nix_event_ctx *nix_event_context;
173 struct rvu_devlink *rvu_dl = rvu_irq;
174 struct rvu *rvu;
175 int blkaddr;
176 u64 intr;
177
178 rvu = rvu_dl->rvu;
179 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
180 if (blkaddr < 0)
181 return IRQ_NONE;
182
183 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
184 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
185 nix_event_context->nix_af_rvu_ras = intr;
186
187 /* Clear interrupts */
188 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
189 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
190 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
191
192 return IRQ_HANDLED;
193 }
194
rvu_nix_unregister_interrupts(struct rvu * rvu)195 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
196 {
197 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
198 int offs, i, blkaddr;
199
200 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
201 if (blkaddr < 0)
202 return;
203
204 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
205 if (!offs)
206 return;
207
208 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
209 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
210 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
211 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
212
213 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
214 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
215 rvu_dl);
216 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
217 }
218
219 for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
220 if (rvu->irq_allocated[offs + i]) {
221 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
222 rvu->irq_allocated[offs + i] = false;
223 }
224 }
225
rvu_nix_register_interrupts(struct rvu * rvu)226 static int rvu_nix_register_interrupts(struct rvu *rvu)
227 {
228 int blkaddr, base;
229 bool rc;
230
231 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
232 if (blkaddr < 0)
233 return blkaddr;
234
235 /* Get NIX AF MSIX vectors offset. */
236 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
237 if (!base) {
238 dev_warn(rvu->dev,
239 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
240 blkaddr - BLKADDR_NIX0);
241 return 0;
242 }
243 /* Register and enable NIX_AF_RVU_INT interrupt */
244 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
245 "NIX_AF_RVU_INT",
246 rvu_nix_af_rvu_intr_handler);
247 if (!rc)
248 goto err;
249 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
250
251 /* Register and enable NIX_AF_GEN_INT interrupt */
252 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
253 "NIX_AF_GEN_INT",
254 rvu_nix_af_rvu_gen_handler);
255 if (!rc)
256 goto err;
257 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
258
259 /* Register and enable NIX_AF_ERR_INT interrupt */
260 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
261 "NIX_AF_ERR_INT",
262 rvu_nix_af_rvu_err_handler);
263 if (!rc)
264 goto err;
265 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
266
267 /* Register and enable NIX_AF_RAS interrupt */
268 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
269 "NIX_AF_RAS",
270 rvu_nix_af_rvu_ras_handler);
271 if (!rc)
272 goto err;
273 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
274
275 return 0;
276 err:
277 rvu_nix_unregister_interrupts(rvu);
278 return rc;
279 }
280
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)281 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
282 enum nix_af_rvu_health health_reporter)
283 {
284 struct rvu_nix_event_ctx *nix_event_context;
285 u64 intr_val;
286 int err;
287
288 nix_event_context = ctx;
289 switch (health_reporter) {
290 case NIX_AF_RVU_INTR:
291 intr_val = nix_event_context->nix_af_rvu_int;
292 err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
293 if (err)
294 return err;
295 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
296 nix_event_context->nix_af_rvu_int);
297 if (err)
298 return err;
299 if (intr_val & BIT_ULL(0)) {
300 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
301 if (err)
302 return err;
303 }
304 err = rvu_report_pair_end(fmsg);
305 if (err)
306 return err;
307 break;
308 case NIX_AF_RVU_GEN:
309 intr_val = nix_event_context->nix_af_rvu_gen;
310 err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
311 if (err)
312 return err;
313 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
314 nix_event_context->nix_af_rvu_gen);
315 if (err)
316 return err;
317 if (intr_val & BIT_ULL(0)) {
318 err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
319 if (err)
320 return err;
321 }
322 if (intr_val & BIT_ULL(1)) {
323 err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
324 if (err)
325 return err;
326 }
327 if (intr_val & BIT_ULL(4)) {
328 err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
329 if (err)
330 return err;
331 }
332 err = rvu_report_pair_end(fmsg);
333 if (err)
334 return err;
335 break;
336 case NIX_AF_RVU_ERR:
337 intr_val = nix_event_context->nix_af_rvu_err;
338 err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
339 if (err)
340 return err;
341 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
342 nix_event_context->nix_af_rvu_err);
343 if (err)
344 return err;
345 if (intr_val & BIT_ULL(14)) {
346 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
347 if (err)
348 return err;
349 }
350 if (intr_val & BIT_ULL(13)) {
351 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
352 if (err)
353 return err;
354 }
355 if (intr_val & BIT_ULL(12)) {
356 err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
357 if (err)
358 return err;
359 }
360 if (intr_val & BIT_ULL(6)) {
361 err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
362 if (err)
363 return err;
364 }
365 if (intr_val & BIT_ULL(5)) {
366 err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
367 if (err)
368 return err;
369 }
370 if (intr_val & BIT_ULL(4)) {
371 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
372 if (err)
373 return err;
374 }
375 if (intr_val & BIT_ULL(3)) {
376 err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
377 if (err)
378 return err;
379 }
380 if (intr_val & BIT_ULL(2)) {
381 err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
382 if (err)
383 return err;
384 }
385 if (intr_val & BIT_ULL(1)) {
386 err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
387 if (err)
388 return err;
389 }
390 if (intr_val & BIT_ULL(0)) {
391 err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
392 if (err)
393 return err;
394 }
395 err = rvu_report_pair_end(fmsg);
396 if (err)
397 return err;
398 break;
399 case NIX_AF_RVU_RAS:
400 intr_val = nix_event_context->nix_af_rvu_err;
401 err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
402 if (err)
403 return err;
404 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
405 nix_event_context->nix_af_rvu_err);
406 if (err)
407 return err;
408 err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
409 if (err)
410 return err;
411 if (intr_val & BIT_ULL(34)) {
412 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
413 if (err)
414 return err;
415 }
416 if (intr_val & BIT_ULL(33)) {
417 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
418 if (err)
419 return err;
420 }
421 if (intr_val & BIT_ULL(32)) {
422 err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
423 if (err)
424 return err;
425 }
426 if (intr_val & BIT_ULL(4)) {
427 err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
428 if (err)
429 return err;
430 }
431 if (intr_val & BIT_ULL(3)) {
432 err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
433
434 if (err)
435 return err;
436 }
437 if (intr_val & BIT_ULL(2)) {
438 err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
439 if (err)
440 return err;
441 }
442 if (intr_val & BIT_ULL(1)) {
443 err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
444 if (err)
445 return err;
446 }
447 if (intr_val & BIT_ULL(0)) {
448 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
449 if (err)
450 return err;
451 }
452 err = rvu_report_pair_end(fmsg);
453 if (err)
454 return err;
455 break;
456 default:
457 return -EINVAL;
458 }
459
460 return 0;
461 }
462
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)463 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
464 struct devlink_fmsg *fmsg, void *ctx,
465 struct netlink_ext_ack *netlink_extack)
466 {
467 struct rvu *rvu = devlink_health_reporter_priv(reporter);
468 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
469 struct rvu_nix_event_ctx *nix_ctx;
470
471 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
472
473 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
474 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
475 }
476
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)477 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
478 void *ctx, struct netlink_ext_ack *netlink_extack)
479 {
480 struct rvu *rvu = devlink_health_reporter_priv(reporter);
481 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
482 int blkaddr;
483
484 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
485 if (blkaddr < 0)
486 return blkaddr;
487
488 if (nix_event_ctx->nix_af_rvu_int)
489 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
490
491 return 0;
492 }
493
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)494 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
495 struct devlink_fmsg *fmsg, void *ctx,
496 struct netlink_ext_ack *netlink_extack)
497 {
498 struct rvu *rvu = devlink_health_reporter_priv(reporter);
499 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
500 struct rvu_nix_event_ctx *nix_ctx;
501
502 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
503
504 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
505 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
506 }
507
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)508 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
509 void *ctx, struct netlink_ext_ack *netlink_extack)
510 {
511 struct rvu *rvu = devlink_health_reporter_priv(reporter);
512 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
513 int blkaddr;
514
515 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
516 if (blkaddr < 0)
517 return blkaddr;
518
519 if (nix_event_ctx->nix_af_rvu_gen)
520 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
521
522 return 0;
523 }
524
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)525 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
526 struct devlink_fmsg *fmsg, void *ctx,
527 struct netlink_ext_ack *netlink_extack)
528 {
529 struct rvu *rvu = devlink_health_reporter_priv(reporter);
530 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
531 struct rvu_nix_event_ctx *nix_ctx;
532
533 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
534
535 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
536 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
537 }
538
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)539 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
540 void *ctx, struct netlink_ext_ack *netlink_extack)
541 {
542 struct rvu *rvu = devlink_health_reporter_priv(reporter);
543 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
544 int blkaddr;
545
546 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
547 if (blkaddr < 0)
548 return blkaddr;
549
550 if (nix_event_ctx->nix_af_rvu_err)
551 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
552
553 return 0;
554 }
555
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)556 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
557 struct devlink_fmsg *fmsg, void *ctx,
558 struct netlink_ext_ack *netlink_extack)
559 {
560 struct rvu *rvu = devlink_health_reporter_priv(reporter);
561 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
562 struct rvu_nix_event_ctx *nix_ctx;
563
564 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
565
566 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
567 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
568 }
569
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)570 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
571 void *ctx, struct netlink_ext_ack *netlink_extack)
572 {
573 struct rvu *rvu = devlink_health_reporter_priv(reporter);
574 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
575 int blkaddr;
576
577 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
578 if (blkaddr < 0)
579 return blkaddr;
580
581 if (nix_event_ctx->nix_af_rvu_int)
582 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
583
584 return 0;
585 }
586
587 RVU_REPORTERS(hw_nix_intr);
588 RVU_REPORTERS(hw_nix_gen);
589 RVU_REPORTERS(hw_nix_err);
590 RVU_REPORTERS(hw_nix_ras);
591
592 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
593
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)594 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
595 {
596 struct rvu_nix_health_reporters *rvu_reporters;
597 struct rvu_nix_event_ctx *nix_event_context;
598 struct rvu *rvu = rvu_dl->rvu;
599
600 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
601 if (!rvu_reporters)
602 return -ENOMEM;
603
604 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
605 nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
606 if (!nix_event_context)
607 return -ENOMEM;
608
609 rvu_reporters->nix_event_ctx = nix_event_context;
610 rvu_reporters->rvu_hw_nix_intr_reporter =
611 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
612 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
613 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
614 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
615 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
616 }
617
618 rvu_reporters->rvu_hw_nix_gen_reporter =
619 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
620 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
621 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
622 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
623 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
624 }
625
626 rvu_reporters->rvu_hw_nix_err_reporter =
627 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
628 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
629 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
630 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
631 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
632 }
633
634 rvu_reporters->rvu_hw_nix_ras_reporter =
635 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
636 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
637 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
638 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
639 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
640 }
641
642 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
643 if (!rvu_dl->devlink_wq)
644 return -ENOMEM;
645
646 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
647 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
648 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
649 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
650
651 return 0;
652 }
653
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)654 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
655 {
656 struct rvu *rvu = rvu_dl->rvu;
657 int err;
658
659 err = rvu_nix_register_reporters(rvu_dl);
660 if (err) {
661 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
662 err);
663 return err;
664 }
665 rvu_nix_register_interrupts(rvu);
666
667 return 0;
668 }
669
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)670 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
671 {
672 struct rvu_nix_health_reporters *nix_reporters;
673 struct rvu *rvu = rvu_dl->rvu;
674
675 nix_reporters = rvu_dl->rvu_nix_health_reporter;
676
677 if (!nix_reporters->rvu_hw_nix_ras_reporter)
678 return;
679 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
680 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
681
682 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
683 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
684
685 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
686 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
687
688 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
689 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
690
691 rvu_nix_unregister_interrupts(rvu);
692 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
693 kfree(rvu_dl->rvu_nix_health_reporter);
694 }
695
rvu_npa_intr_work(struct work_struct * work)696 static void rvu_npa_intr_work(struct work_struct *work)
697 {
698 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
699
700 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
701 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
702 "NPA_AF_RVU Error",
703 rvu_npa_health_reporter->npa_event_ctx);
704 }
705
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)706 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
707 {
708 struct rvu_npa_event_ctx *npa_event_context;
709 struct rvu_devlink *rvu_dl = rvu_irq;
710 struct rvu *rvu;
711 int blkaddr;
712 u64 intr;
713
714 rvu = rvu_dl->rvu;
715 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
716 if (blkaddr < 0)
717 return IRQ_NONE;
718
719 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
720 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
721 npa_event_context->npa_af_rvu_int = intr;
722
723 /* Clear interrupts */
724 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
725 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
726 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
727
728 return IRQ_HANDLED;
729 }
730
rvu_npa_gen_work(struct work_struct * work)731 static void rvu_npa_gen_work(struct work_struct *work)
732 {
733 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
734
735 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
736 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
737 "NPA_AF_GEN Error",
738 rvu_npa_health_reporter->npa_event_ctx);
739 }
740
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)741 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
742 {
743 struct rvu_npa_event_ctx *npa_event_context;
744 struct rvu_devlink *rvu_dl = rvu_irq;
745 struct rvu *rvu;
746 int blkaddr;
747 u64 intr;
748
749 rvu = rvu_dl->rvu;
750 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
751 if (blkaddr < 0)
752 return IRQ_NONE;
753
754 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
755 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
756 npa_event_context->npa_af_rvu_gen = intr;
757
758 /* Clear interrupts */
759 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
760 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
761 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
762
763 return IRQ_HANDLED;
764 }
765
rvu_npa_err_work(struct work_struct * work)766 static void rvu_npa_err_work(struct work_struct *work)
767 {
768 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
769
770 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
771 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
772 "NPA_AF_ERR Error",
773 rvu_npa_health_reporter->npa_event_ctx);
774 }
775
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)776 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
777 {
778 struct rvu_npa_event_ctx *npa_event_context;
779 struct rvu_devlink *rvu_dl = rvu_irq;
780 struct rvu *rvu;
781 int blkaddr;
782 u64 intr;
783
784 rvu = rvu_dl->rvu;
785 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
786 if (blkaddr < 0)
787 return IRQ_NONE;
788 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
789 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
790 npa_event_context->npa_af_rvu_err = intr;
791
792 /* Clear interrupts */
793 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
794 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
795 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
796
797 return IRQ_HANDLED;
798 }
799
rvu_npa_ras_work(struct work_struct * work)800 static void rvu_npa_ras_work(struct work_struct *work)
801 {
802 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
803
804 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
805 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
806 "HW NPA_AF_RAS Error reported",
807 rvu_npa_health_reporter->npa_event_ctx);
808 }
809
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)810 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
811 {
812 struct rvu_npa_event_ctx *npa_event_context;
813 struct rvu_devlink *rvu_dl = rvu_irq;
814 struct rvu *rvu;
815 int blkaddr;
816 u64 intr;
817
818 rvu = rvu_dl->rvu;
819 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
820 if (blkaddr < 0)
821 return IRQ_NONE;
822
823 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
824 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
825 npa_event_context->npa_af_rvu_ras = intr;
826
827 /* Clear interrupts */
828 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
829 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
830 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
831
832 return IRQ_HANDLED;
833 }
834
rvu_npa_unregister_interrupts(struct rvu * rvu)835 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
836 {
837 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
838 int i, offs, blkaddr;
839 u64 reg;
840
841 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
842 if (blkaddr < 0)
843 return;
844
845 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
846 offs = reg & 0x3FF;
847
848 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
849 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
850 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
851 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
852
853 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
854 if (rvu->irq_allocated[offs + i]) {
855 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
856 rvu->irq_allocated[offs + i] = false;
857 }
858 }
859
rvu_npa_register_interrupts(struct rvu * rvu)860 static int rvu_npa_register_interrupts(struct rvu *rvu)
861 {
862 int blkaddr, base;
863 bool rc;
864
865 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
866 if (blkaddr < 0)
867 return blkaddr;
868
869 /* Get NPA AF MSIX vectors offset. */
870 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
871 if (!base) {
872 dev_warn(rvu->dev,
873 "Failed to get NPA_AF_INT vector offsets\n");
874 return 0;
875 }
876
877 /* Register and enable NPA_AF_RVU_INT interrupt */
878 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
879 "NPA_AF_RVU_INT",
880 rvu_npa_af_rvu_intr_handler);
881 if (!rc)
882 goto err;
883 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
884
885 /* Register and enable NPA_AF_GEN_INT interrupt */
886 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
887 "NPA_AF_RVU_GEN",
888 rvu_npa_af_gen_intr_handler);
889 if (!rc)
890 goto err;
891 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
892
893 /* Register and enable NPA_AF_ERR_INT interrupt */
894 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
895 "NPA_AF_ERR_INT",
896 rvu_npa_af_err_intr_handler);
897 if (!rc)
898 goto err;
899 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
900
901 /* Register and enable NPA_AF_RAS interrupt */
902 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
903 "NPA_AF_RAS",
904 rvu_npa_af_ras_intr_handler);
905 if (!rc)
906 goto err;
907 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
908
909 return 0;
910 err:
911 rvu_npa_unregister_interrupts(rvu);
912 return rc;
913 }
914
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)915 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
916 enum npa_af_rvu_health health_reporter)
917 {
918 struct rvu_npa_event_ctx *npa_event_context;
919 unsigned int alloc_dis, free_dis;
920 u64 intr_val;
921 int err;
922
923 npa_event_context = ctx;
924 switch (health_reporter) {
925 case NPA_AF_RVU_GEN:
926 intr_val = npa_event_context->npa_af_rvu_gen;
927 err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
928 if (err)
929 return err;
930 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
931 npa_event_context->npa_af_rvu_gen);
932 if (err)
933 return err;
934 if (intr_val & BIT_ULL(32)) {
935 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
936 if (err)
937 return err;
938 }
939
940 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
941 if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
942 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
943 if (err)
944 return err;
945 }
946 if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
947 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
948 if (err)
949 return err;
950 }
951 if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
952 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
953 if (err)
954 return err;
955 }
956 if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
957 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
958 if (err)
959 return err;
960 }
961 if (free_dis & BIT(NPA_INPQ_SSO)) {
962 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
963 if (err)
964 return err;
965 }
966 if (free_dis & BIT(NPA_INPQ_TIM)) {
967 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
968 if (err)
969 return err;
970 }
971 if (free_dis & BIT(NPA_INPQ_DPI)) {
972 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
973 if (err)
974 return err;
975 }
976 if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
977 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
978 if (err)
979 return err;
980 }
981
982 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
983 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
984 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
985 if (err)
986 return err;
987 }
988 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
989 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
990 if (err)
991 return err;
992 }
993 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
994 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
995 if (err)
996 return err;
997 }
998 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
999 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
1000 if (err)
1001 return err;
1002 }
1003 if (alloc_dis & BIT(NPA_INPQ_SSO)) {
1004 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
1005 if (err)
1006 return err;
1007 }
1008 if (alloc_dis & BIT(NPA_INPQ_TIM)) {
1009 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
1010 if (err)
1011 return err;
1012 }
1013 if (alloc_dis & BIT(NPA_INPQ_DPI)) {
1014 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
1015 if (err)
1016 return err;
1017 }
1018 if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
1019 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
1020 if (err)
1021 return err;
1022 }
1023 err = rvu_report_pair_end(fmsg);
1024 if (err)
1025 return err;
1026 break;
1027 case NPA_AF_RVU_ERR:
1028 err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
1029 if (err)
1030 return err;
1031 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
1032 npa_event_context->npa_af_rvu_err);
1033 if (err)
1034 return err;
1035
1036 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
1037 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
1038 if (err)
1039 return err;
1040 }
1041 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
1042 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
1043 if (err)
1044 return err;
1045 }
1046 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
1047 err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
1048 if (err)
1049 return err;
1050 }
1051 err = rvu_report_pair_end(fmsg);
1052 if (err)
1053 return err;
1054 break;
1055 case NPA_AF_RVU_RAS:
1056 err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
1057 if (err)
1058 return err;
1059 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
1060 npa_event_context->npa_af_rvu_ras);
1061 if (err)
1062 return err;
1063 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
1064 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
1065 if (err)
1066 return err;
1067 }
1068 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
1069 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
1070 if (err)
1071 return err;
1072 }
1073 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
1074 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
1075 if (err)
1076 return err;
1077 }
1078 err = rvu_report_pair_end(fmsg);
1079 if (err)
1080 return err;
1081 break;
1082 case NPA_AF_RVU_INTR:
1083 err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
1084 if (err)
1085 return err;
1086 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
1087 npa_event_context->npa_af_rvu_int);
1088 if (err)
1089 return err;
1090 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
1091 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
1092 if (err)
1093 return err;
1094 }
1095 return rvu_report_pair_end(fmsg);
1096 default:
1097 return -EINVAL;
1098 }
1099
1100 return 0;
1101 }
1102
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1103 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
1104 struct devlink_fmsg *fmsg, void *ctx,
1105 struct netlink_ext_ack *netlink_extack)
1106 {
1107 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1108 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1109 struct rvu_npa_event_ctx *npa_ctx;
1110
1111 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1112
1113 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
1114 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
1115 }
1116
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1117 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
1118 void *ctx, struct netlink_ext_ack *netlink_extack)
1119 {
1120 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1121 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1122 int blkaddr;
1123
1124 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1125 if (blkaddr < 0)
1126 return blkaddr;
1127
1128 if (npa_event_ctx->npa_af_rvu_int)
1129 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
1130
1131 return 0;
1132 }
1133
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1134 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
1135 struct devlink_fmsg *fmsg, void *ctx,
1136 struct netlink_ext_ack *netlink_extack)
1137 {
1138 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1139 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1140 struct rvu_npa_event_ctx *npa_ctx;
1141
1142 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1143
1144 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
1145 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
1146 }
1147
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1148 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
1149 void *ctx, struct netlink_ext_ack *netlink_extack)
1150 {
1151 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1152 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1153 int blkaddr;
1154
1155 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1156 if (blkaddr < 0)
1157 return blkaddr;
1158
1159 if (npa_event_ctx->npa_af_rvu_gen)
1160 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
1161
1162 return 0;
1163 }
1164
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1165 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
1166 struct devlink_fmsg *fmsg, void *ctx,
1167 struct netlink_ext_ack *netlink_extack)
1168 {
1169 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1170 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1171 struct rvu_npa_event_ctx *npa_ctx;
1172
1173 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1174
1175 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
1176 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
1177 }
1178
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1179 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
1180 void *ctx, struct netlink_ext_ack *netlink_extack)
1181 {
1182 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1183 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1184 int blkaddr;
1185
1186 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1187 if (blkaddr < 0)
1188 return blkaddr;
1189
1190 if (npa_event_ctx->npa_af_rvu_err)
1191 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1192
1193 return 0;
1194 }
1195
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1196 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1197 struct devlink_fmsg *fmsg, void *ctx,
1198 struct netlink_ext_ack *netlink_extack)
1199 {
1200 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1201 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1202 struct rvu_npa_event_ctx *npa_ctx;
1203
1204 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1205
1206 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1207 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1208 }
1209
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1210 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1211 void *ctx, struct netlink_ext_ack *netlink_extack)
1212 {
1213 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1214 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1215 int blkaddr;
1216
1217 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1218 if (blkaddr < 0)
1219 return blkaddr;
1220
1221 if (npa_event_ctx->npa_af_rvu_ras)
1222 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1223
1224 return 0;
1225 }
1226
1227 RVU_REPORTERS(hw_npa_intr);
1228 RVU_REPORTERS(hw_npa_gen);
1229 RVU_REPORTERS(hw_npa_err);
1230 RVU_REPORTERS(hw_npa_ras);
1231
1232 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1233
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1234 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1235 {
1236 struct rvu_npa_health_reporters *rvu_reporters;
1237 struct rvu_npa_event_ctx *npa_event_context;
1238 struct rvu *rvu = rvu_dl->rvu;
1239
1240 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1241 if (!rvu_reporters)
1242 return -ENOMEM;
1243
1244 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1245 npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1246 if (!npa_event_context)
1247 return -ENOMEM;
1248
1249 rvu_reporters->npa_event_ctx = npa_event_context;
1250 rvu_reporters->rvu_hw_npa_intr_reporter =
1251 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1252 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1253 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1254 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1255 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1256 }
1257
1258 rvu_reporters->rvu_hw_npa_gen_reporter =
1259 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1260 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1261 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1262 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1263 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1264 }
1265
1266 rvu_reporters->rvu_hw_npa_err_reporter =
1267 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1268 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1269 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1270 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1271 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1272 }
1273
1274 rvu_reporters->rvu_hw_npa_ras_reporter =
1275 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1276 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1277 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1278 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1279 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1280 }
1281
1282 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1283 if (!rvu_dl->devlink_wq)
1284 return -ENOMEM;
1285
1286 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1287 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1288 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1289 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1290
1291 return 0;
1292 }
1293
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1294 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1295 {
1296 struct rvu *rvu = rvu_dl->rvu;
1297 int err;
1298
1299 err = rvu_npa_register_reporters(rvu_dl);
1300 if (err) {
1301 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1302 err);
1303 return err;
1304 }
1305 rvu_npa_register_interrupts(rvu);
1306
1307 return 0;
1308 }
1309
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1310 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1311 {
1312 struct rvu_npa_health_reporters *npa_reporters;
1313 struct rvu *rvu = rvu_dl->rvu;
1314
1315 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1316
1317 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1318 return;
1319 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1320 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1321
1322 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1323 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1324
1325 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1326 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1327
1328 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1329 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1330
1331 rvu_npa_unregister_interrupts(rvu);
1332 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1333 kfree(rvu_dl->rvu_npa_health_reporter);
1334 }
1335
rvu_health_reporters_create(struct rvu * rvu)1336 static int rvu_health_reporters_create(struct rvu *rvu)
1337 {
1338 struct rvu_devlink *rvu_dl;
1339 int err;
1340
1341 rvu_dl = rvu->rvu_dl;
1342 err = rvu_npa_health_reporters_create(rvu_dl);
1343 if (err)
1344 return err;
1345
1346 return rvu_nix_health_reporters_create(rvu_dl);
1347 }
1348
rvu_health_reporters_destroy(struct rvu * rvu)1349 static void rvu_health_reporters_destroy(struct rvu *rvu)
1350 {
1351 struct rvu_devlink *rvu_dl;
1352
1353 if (!rvu->rvu_dl)
1354 return;
1355
1356 rvu_dl = rvu->rvu_dl;
1357 rvu_npa_health_reporters_destroy(rvu_dl);
1358 rvu_nix_health_reporters_destroy(rvu_dl);
1359 }
1360
1361 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1362 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1363 union devlink_param_value val,
1364 struct netlink_ext_ack *extack)
1365 {
1366 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1367 struct rvu *rvu = rvu_dl->rvu;
1368 int dwrr_mtu = val.vu32;
1369 struct nix_txsch *txsch;
1370 struct nix_hw *nix_hw;
1371
1372 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1373 NL_SET_ERR_MSG_MOD(extack,
1374 "Setting DWRR_MTU is not supported on this silicon");
1375 return -EOPNOTSUPP;
1376 }
1377
1378 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1379 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1380 NL_SET_ERR_MSG_MOD(extack,
1381 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1382 return -EINVAL;
1383 }
1384
1385 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1386 if (!nix_hw)
1387 return -ENODEV;
1388
1389 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1390 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1391 NL_SET_ERR_MSG_MOD(extack,
1392 "Changing DWRR MTU is not supported when there are active NIXLFs");
1393 NL_SET_ERR_MSG_MOD(extack,
1394 "Make sure none of the PF/VF interfaces are initialized and retry");
1395 return -EOPNOTSUPP;
1396 }
1397
1398 return 0;
1399 }
1400
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1401 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1402 struct devlink_param_gset_ctx *ctx)
1403 {
1404 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1405 struct rvu *rvu = rvu_dl->rvu;
1406 u64 dwrr_mtu;
1407
1408 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1409 rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
1410
1411 return 0;
1412 }
1413
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1414 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1415 struct devlink_param_gset_ctx *ctx)
1416 {
1417 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1418 struct rvu *rvu = rvu_dl->rvu;
1419 u64 dwrr_mtu;
1420
1421 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1422 return -EOPNOTSUPP;
1423
1424 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
1425 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1426
1427 return 0;
1428 }
1429
1430 enum rvu_af_dl_param_id {
1431 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1432 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1433 };
1434
1435 static const struct devlink_param rvu_af_dl_params[] = {
1436 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1437 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1438 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1439 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1440 rvu_af_dl_dwrr_mtu_validate),
1441 };
1442
1443 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1444 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1445 {
1446 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1447 struct rvu *rvu = rvu_dl->rvu;
1448 struct rvu_switch *rswitch;
1449
1450 rswitch = &rvu->rswitch;
1451 *mode = rswitch->mode;
1452
1453 return 0;
1454 }
1455
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1456 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1457 struct netlink_ext_ack *extack)
1458 {
1459 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1460 struct rvu *rvu = rvu_dl->rvu;
1461 struct rvu_switch *rswitch;
1462
1463 rswitch = &rvu->rswitch;
1464 switch (mode) {
1465 case DEVLINK_ESWITCH_MODE_LEGACY:
1466 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1467 if (rswitch->mode == mode)
1468 return 0;
1469 rswitch->mode = mode;
1470 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1471 rvu_switch_enable(rvu);
1472 else
1473 rvu_switch_disable(rvu);
1474 break;
1475 default:
1476 return -EINVAL;
1477 }
1478
1479 return 0;
1480 }
1481
rvu_devlink_info_get(struct devlink * devlink,struct devlink_info_req * req,struct netlink_ext_ack * extack)1482 static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1483 struct netlink_ext_ack *extack)
1484 {
1485 return devlink_info_driver_name_put(req, DRV_NAME);
1486 }
1487
1488 static const struct devlink_ops rvu_devlink_ops = {
1489 .info_get = rvu_devlink_info_get,
1490 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1491 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1492 };
1493
rvu_register_dl(struct rvu * rvu)1494 int rvu_register_dl(struct rvu *rvu)
1495 {
1496 struct rvu_devlink *rvu_dl;
1497 struct devlink *dl;
1498 int err;
1499
1500 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1501 rvu->dev);
1502 if (!dl) {
1503 dev_warn(rvu->dev, "devlink_alloc failed\n");
1504 return -ENOMEM;
1505 }
1506
1507 err = devlink_register(dl);
1508 if (err) {
1509 dev_err(rvu->dev, "devlink register failed with error %d\n", err);
1510 devlink_free(dl);
1511 return err;
1512 }
1513
1514 rvu_dl = devlink_priv(dl);
1515 rvu_dl->dl = dl;
1516 rvu_dl->rvu = rvu;
1517 rvu->rvu_dl = rvu_dl;
1518
1519 err = rvu_health_reporters_create(rvu);
1520 if (err) {
1521 dev_err(rvu->dev,
1522 "devlink health reporter creation failed with error %d\n", err);
1523 goto err_dl_health;
1524 }
1525
1526 err = devlink_params_register(dl, rvu_af_dl_params,
1527 ARRAY_SIZE(rvu_af_dl_params));
1528 if (err) {
1529 dev_err(rvu->dev,
1530 "devlink params register failed with error %d", err);
1531 goto err_dl_health;
1532 }
1533
1534 devlink_params_publish(dl);
1535
1536 return 0;
1537
1538 err_dl_health:
1539 rvu_health_reporters_destroy(rvu);
1540 devlink_unregister(dl);
1541 devlink_free(dl);
1542 return err;
1543 }
1544
rvu_unregister_dl(struct rvu * rvu)1545 void rvu_unregister_dl(struct rvu *rvu)
1546 {
1547 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1548 struct devlink *dl = rvu_dl->dl;
1549
1550 if (!dl)
1551 return;
1552
1553 devlink_params_unregister(dl, rvu_af_dl_params,
1554 ARRAY_SIZE(rvu_af_dl_params));
1555 rvu_health_reporters_destroy(rvu);
1556 devlink_unregister(dl);
1557 devlink_free(dl);
1558 }
1559