• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include<linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14 
15 #define DRV_NAME "octeontx2-af"
16 
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)17 static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 	int err;
20 
21 	err = devlink_fmsg_pair_nest_start(fmsg, name);
22 	if (err)
23 		return err;
24 
25 	return  devlink_fmsg_obj_nest_start(fmsg);
26 }
27 
rvu_report_pair_end(struct devlink_fmsg * fmsg)28 static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
29 {
30 	int err;
31 
32 	err = devlink_fmsg_obj_nest_end(fmsg);
33 	if (err)
34 		return err;
35 
36 	return devlink_fmsg_pair_nest_end(fmsg);
37 }
38 
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)39 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
40 				   const char *name, irq_handler_t fn)
41 {
42 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
43 	int rc;
44 
45 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
46 	rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
47 			 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
48 	if (rc)
49 		dev_warn(rvu->dev, "Failed to register %s irq\n", name);
50 	else
51 		rvu->irq_allocated[offset] = true;
52 
53 	return rvu->irq_allocated[offset];
54 }
55 
rvu_nix_intr_work(struct work_struct * work)56 static void rvu_nix_intr_work(struct work_struct *work)
57 {
58 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
59 
60 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
61 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
62 			      "NIX_AF_RVU Error",
63 			      rvu_nix_health_reporter->nix_event_ctx);
64 }
65 
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)66 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
67 {
68 	struct rvu_nix_event_ctx *nix_event_context;
69 	struct rvu_devlink *rvu_dl = rvu_irq;
70 	struct rvu *rvu;
71 	int blkaddr;
72 	u64 intr;
73 
74 	rvu = rvu_dl->rvu;
75 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
76 	if (blkaddr < 0)
77 		return IRQ_NONE;
78 
79 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
80 	intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
81 	nix_event_context->nix_af_rvu_int = intr;
82 
83 	/* Clear interrupts */
84 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
85 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
86 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
87 
88 	return IRQ_HANDLED;
89 }
90 
rvu_nix_gen_work(struct work_struct * work)91 static void rvu_nix_gen_work(struct work_struct *work)
92 {
93 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
94 
95 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
96 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
97 			      "NIX_AF_GEN Error",
98 			      rvu_nix_health_reporter->nix_event_ctx);
99 }
100 
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)101 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
102 {
103 	struct rvu_nix_event_ctx *nix_event_context;
104 	struct rvu_devlink *rvu_dl = rvu_irq;
105 	struct rvu *rvu;
106 	int blkaddr;
107 	u64 intr;
108 
109 	rvu = rvu_dl->rvu;
110 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
111 	if (blkaddr < 0)
112 		return IRQ_NONE;
113 
114 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
115 	intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
116 	nix_event_context->nix_af_rvu_gen = intr;
117 
118 	/* Clear interrupts */
119 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
120 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
121 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
122 
123 	return IRQ_HANDLED;
124 }
125 
rvu_nix_err_work(struct work_struct * work)126 static void rvu_nix_err_work(struct work_struct *work)
127 {
128 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
129 
130 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
131 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
132 			      "NIX_AF_ERR Error",
133 			      rvu_nix_health_reporter->nix_event_ctx);
134 }
135 
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)136 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
137 {
138 	struct rvu_nix_event_ctx *nix_event_context;
139 	struct rvu_devlink *rvu_dl = rvu_irq;
140 	struct rvu *rvu;
141 	int blkaddr;
142 	u64 intr;
143 
144 	rvu = rvu_dl->rvu;
145 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
146 	if (blkaddr < 0)
147 		return IRQ_NONE;
148 
149 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
150 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
151 	nix_event_context->nix_af_rvu_err = intr;
152 
153 	/* Clear interrupts */
154 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
155 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
156 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
157 
158 	return IRQ_HANDLED;
159 }
160 
rvu_nix_ras_work(struct work_struct * work)161 static void rvu_nix_ras_work(struct work_struct *work)
162 {
163 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
164 
165 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
166 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
167 			      "NIX_AF_RAS Error",
168 			      rvu_nix_health_reporter->nix_event_ctx);
169 }
170 
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)171 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
172 {
173 	struct rvu_nix_event_ctx *nix_event_context;
174 	struct rvu_devlink *rvu_dl = rvu_irq;
175 	struct rvu *rvu;
176 	int blkaddr;
177 	u64 intr;
178 
179 	rvu = rvu_dl->rvu;
180 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
181 	if (blkaddr < 0)
182 		return IRQ_NONE;
183 
184 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
185 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
186 	nix_event_context->nix_af_rvu_ras = intr;
187 
188 	/* Clear interrupts */
189 	rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
190 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
191 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
192 
193 	return IRQ_HANDLED;
194 }
195 
rvu_nix_unregister_interrupts(struct rvu * rvu)196 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
197 {
198 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
199 	int offs, i, blkaddr;
200 
201 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
202 	if (blkaddr < 0)
203 		return;
204 
205 	offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
206 	if (!offs)
207 		return;
208 
209 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
210 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
211 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
212 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
213 
214 	if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
215 		free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
216 			 rvu_dl);
217 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
218 	}
219 
220 	for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
221 		if (rvu->irq_allocated[offs + i]) {
222 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
223 			rvu->irq_allocated[offs + i] = false;
224 		}
225 }
226 
rvu_nix_register_interrupts(struct rvu * rvu)227 static int rvu_nix_register_interrupts(struct rvu *rvu)
228 {
229 	int blkaddr, base;
230 	bool rc;
231 
232 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
233 	if (blkaddr < 0)
234 		return blkaddr;
235 
236 	/* Get NIX AF MSIX vectors offset. */
237 	base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
238 	if (!base) {
239 		dev_warn(rvu->dev,
240 			 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
241 			 blkaddr - BLKADDR_NIX0);
242 		return 0;
243 	}
244 	/* Register and enable NIX_AF_RVU_INT interrupt */
245 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_RVU,
246 				    "NIX_AF_RVU_INT",
247 				    rvu_nix_af_rvu_intr_handler);
248 	if (!rc)
249 		goto err;
250 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
251 
252 	/* Register and enable NIX_AF_GEN_INT interrupt */
253 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_GEN,
254 				    "NIX_AF_GEN_INT",
255 				    rvu_nix_af_rvu_gen_handler);
256 	if (!rc)
257 		goto err;
258 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
259 
260 	/* Register and enable NIX_AF_ERR_INT interrupt */
261 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
262 				    "NIX_AF_ERR_INT",
263 				    rvu_nix_af_rvu_err_handler);
264 	if (!rc)
265 		goto err;
266 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
267 
268 	/* Register and enable NIX_AF_RAS interrupt */
269 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
270 				    "NIX_AF_RAS",
271 				    rvu_nix_af_rvu_ras_handler);
272 	if (!rc)
273 		goto err;
274 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
275 
276 	return 0;
277 err:
278 	rvu_nix_unregister_interrupts(rvu);
279 	return rc;
280 }
281 
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)282 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
283 			       enum nix_af_rvu_health health_reporter)
284 {
285 	struct rvu_nix_event_ctx *nix_event_context;
286 	u64 intr_val;
287 	int err;
288 
289 	nix_event_context = ctx;
290 	switch (health_reporter) {
291 	case NIX_AF_RVU_INTR:
292 		intr_val = nix_event_context->nix_af_rvu_int;
293 		err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
294 		if (err)
295 			return err;
296 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
297 						nix_event_context->nix_af_rvu_int);
298 		if (err)
299 			return err;
300 		if (intr_val & BIT_ULL(0)) {
301 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
302 			if (err)
303 				return err;
304 		}
305 		err = rvu_report_pair_end(fmsg);
306 		if (err)
307 			return err;
308 		break;
309 	case NIX_AF_RVU_GEN:
310 		intr_val = nix_event_context->nix_af_rvu_gen;
311 		err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
312 		if (err)
313 			return err;
314 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
315 						nix_event_context->nix_af_rvu_gen);
316 		if (err)
317 			return err;
318 		if (intr_val & BIT_ULL(0)) {
319 			err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
320 			if (err)
321 				return err;
322 		}
323 		if (intr_val & BIT_ULL(1)) {
324 			err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
325 			if (err)
326 				return err;
327 		}
328 		if (intr_val & BIT_ULL(4)) {
329 			err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
330 			if (err)
331 				return err;
332 		}
333 		err = rvu_report_pair_end(fmsg);
334 		if (err)
335 			return err;
336 		break;
337 	case NIX_AF_RVU_ERR:
338 		intr_val = nix_event_context->nix_af_rvu_err;
339 		err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
340 		if (err)
341 			return err;
342 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
343 						nix_event_context->nix_af_rvu_err);
344 		if (err)
345 			return err;
346 		if (intr_val & BIT_ULL(14)) {
347 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
348 			if (err)
349 				return err;
350 		}
351 		if (intr_val & BIT_ULL(13)) {
352 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
353 			if (err)
354 				return err;
355 		}
356 		if (intr_val & BIT_ULL(12)) {
357 			err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
358 			if (err)
359 				return err;
360 		}
361 		if (intr_val & BIT_ULL(6)) {
362 			err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
363 			if (err)
364 				return err;
365 		}
366 		if (intr_val & BIT_ULL(5)) {
367 			err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
368 			if (err)
369 				return err;
370 		}
371 		if (intr_val & BIT_ULL(4)) {
372 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
373 			if (err)
374 				return err;
375 		}
376 		if (intr_val & BIT_ULL(3)) {
377 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
378 			if (err)
379 				return err;
380 		}
381 		if (intr_val & BIT_ULL(2)) {
382 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
383 			if (err)
384 				return err;
385 		}
386 		if (intr_val & BIT_ULL(1)) {
387 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
388 			if (err)
389 				return err;
390 		}
391 		if (intr_val & BIT_ULL(0)) {
392 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
393 			if (err)
394 				return err;
395 		}
396 		err = rvu_report_pair_end(fmsg);
397 		if (err)
398 			return err;
399 		break;
400 	case NIX_AF_RVU_RAS:
401 		intr_val = nix_event_context->nix_af_rvu_err;
402 		err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
403 		if (err)
404 			return err;
405 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
406 						nix_event_context->nix_af_rvu_err);
407 		if (err)
408 			return err;
409 		err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
410 		if (err)
411 			return err;
412 		if (intr_val & BIT_ULL(34)) {
413 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
414 			if (err)
415 				return err;
416 		}
417 		if (intr_val & BIT_ULL(33)) {
418 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
419 			if (err)
420 				return err;
421 		}
422 		if (intr_val & BIT_ULL(32)) {
423 			err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
424 			if (err)
425 				return err;
426 		}
427 		if (intr_val & BIT_ULL(4)) {
428 			err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
429 			if (err)
430 				return err;
431 		}
432 		if (intr_val & BIT_ULL(3)) {
433 			err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
434 
435 			if (err)
436 				return err;
437 		}
438 		if (intr_val & BIT_ULL(2)) {
439 			err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
440 			if (err)
441 				return err;
442 		}
443 		if (intr_val & BIT_ULL(1)) {
444 			err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
445 			if (err)
446 				return err;
447 		}
448 		if (intr_val & BIT_ULL(0)) {
449 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
450 			if (err)
451 				return err;
452 		}
453 		err = rvu_report_pair_end(fmsg);
454 		if (err)
455 			return err;
456 		break;
457 	default:
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)464 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
465 				struct devlink_fmsg *fmsg, void *ctx,
466 				struct netlink_ext_ack *netlink_extack)
467 {
468 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
469 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
470 	struct rvu_nix_event_ctx *nix_ctx;
471 
472 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
473 
474 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
475 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
476 }
477 
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)478 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
479 				   void *ctx, struct netlink_ext_ack *netlink_extack)
480 {
481 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
482 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
483 	int blkaddr;
484 
485 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
486 	if (blkaddr < 0)
487 		return blkaddr;
488 
489 	if (nix_event_ctx->nix_af_rvu_int)
490 		rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
491 
492 	return 0;
493 }
494 
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)495 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
496 			       struct devlink_fmsg *fmsg, void *ctx,
497 			       struct netlink_ext_ack *netlink_extack)
498 {
499 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
500 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
501 	struct rvu_nix_event_ctx *nix_ctx;
502 
503 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
504 
505 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
506 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
507 }
508 
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)509 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
510 				  void *ctx, struct netlink_ext_ack *netlink_extack)
511 {
512 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
513 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
514 	int blkaddr;
515 
516 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
517 	if (blkaddr < 0)
518 		return blkaddr;
519 
520 	if (nix_event_ctx->nix_af_rvu_gen)
521 		rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
522 
523 	return 0;
524 }
525 
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)526 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
527 			       struct devlink_fmsg *fmsg, void *ctx,
528 			       struct netlink_ext_ack *netlink_extack)
529 {
530 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
531 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
532 	struct rvu_nix_event_ctx *nix_ctx;
533 
534 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
535 
536 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
537 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
538 }
539 
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)540 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
541 				  void *ctx, struct netlink_ext_ack *netlink_extack)
542 {
543 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
544 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
545 	int blkaddr;
546 
547 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
548 	if (blkaddr < 0)
549 		return blkaddr;
550 
551 	if (nix_event_ctx->nix_af_rvu_err)
552 		rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
553 
554 	return 0;
555 }
556 
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)557 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
558 			       struct devlink_fmsg *fmsg, void *ctx,
559 			       struct netlink_ext_ack *netlink_extack)
560 {
561 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
562 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
563 	struct rvu_nix_event_ctx *nix_ctx;
564 
565 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
566 
567 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
568 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
569 }
570 
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)571 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
572 				  void *ctx, struct netlink_ext_ack *netlink_extack)
573 {
574 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
575 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
576 	int blkaddr;
577 
578 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
579 	if (blkaddr < 0)
580 		return blkaddr;
581 
582 	if (nix_event_ctx->nix_af_rvu_int)
583 		rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
584 
585 	return 0;
586 }
587 
588 RVU_REPORTERS(hw_nix_intr);
589 RVU_REPORTERS(hw_nix_gen);
590 RVU_REPORTERS(hw_nix_err);
591 RVU_REPORTERS(hw_nix_ras);
592 
593 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
594 
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)595 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
596 {
597 	struct rvu_nix_health_reporters *rvu_reporters;
598 	struct rvu_nix_event_ctx *nix_event_context;
599 	struct rvu *rvu = rvu_dl->rvu;
600 
601 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
602 	if (!rvu_reporters)
603 		return -ENOMEM;
604 
605 	rvu_dl->rvu_nix_health_reporter = rvu_reporters;
606 	nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
607 	if (!nix_event_context)
608 		return -ENOMEM;
609 
610 	rvu_reporters->nix_event_ctx = nix_event_context;
611 	rvu_reporters->rvu_hw_nix_intr_reporter =
612 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
613 	if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
614 		dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
615 			 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
616 		return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
617 	}
618 
619 	rvu_reporters->rvu_hw_nix_gen_reporter =
620 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
621 	if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
622 		dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
623 			 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
624 		return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
625 	}
626 
627 	rvu_reporters->rvu_hw_nix_err_reporter =
628 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
629 	if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
630 		dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
631 			 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
632 		return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
633 	}
634 
635 	rvu_reporters->rvu_hw_nix_ras_reporter =
636 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
637 	if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
638 		dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
639 			 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
640 		return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
641 	}
642 
643 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
644 	if (!rvu_dl->devlink_wq)
645 		return -ENOMEM;
646 
647 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
648 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
649 	INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
650 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
651 
652 	return 0;
653 }
654 
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)655 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
656 {
657 	struct rvu *rvu = rvu_dl->rvu;
658 	int err;
659 
660 	err = rvu_nix_register_reporters(rvu_dl);
661 	if (err) {
662 		dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
663 			 err);
664 		return err;
665 	}
666 	rvu_nix_register_interrupts(rvu);
667 
668 	return 0;
669 }
670 
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)671 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
672 {
673 	struct rvu_nix_health_reporters *nix_reporters;
674 	struct rvu *rvu = rvu_dl->rvu;
675 
676 	nix_reporters = rvu_dl->rvu_nix_health_reporter;
677 
678 	if (!nix_reporters->rvu_hw_nix_ras_reporter)
679 		return;
680 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
681 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
682 
683 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
684 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
685 
686 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
687 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
688 
689 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
690 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
691 
692 	rvu_nix_unregister_interrupts(rvu);
693 	kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
694 	kfree(rvu_dl->rvu_nix_health_reporter);
695 }
696 
rvu_npa_intr_work(struct work_struct * work)697 static void rvu_npa_intr_work(struct work_struct *work)
698 {
699 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
700 
701 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
702 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
703 			      "NPA_AF_RVU Error",
704 			      rvu_npa_health_reporter->npa_event_ctx);
705 }
706 
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)707 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
708 {
709 	struct rvu_npa_event_ctx *npa_event_context;
710 	struct rvu_devlink *rvu_dl = rvu_irq;
711 	struct rvu *rvu;
712 	int blkaddr;
713 	u64 intr;
714 
715 	rvu = rvu_dl->rvu;
716 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
717 	if (blkaddr < 0)
718 		return IRQ_NONE;
719 
720 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
721 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
722 	npa_event_context->npa_af_rvu_int = intr;
723 
724 	/* Clear interrupts */
725 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
726 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
727 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
728 
729 	return IRQ_HANDLED;
730 }
731 
rvu_npa_gen_work(struct work_struct * work)732 static void rvu_npa_gen_work(struct work_struct *work)
733 {
734 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
735 
736 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
737 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
738 			      "NPA_AF_GEN Error",
739 			      rvu_npa_health_reporter->npa_event_ctx);
740 }
741 
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)742 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
743 {
744 	struct rvu_npa_event_ctx *npa_event_context;
745 	struct rvu_devlink *rvu_dl = rvu_irq;
746 	struct rvu *rvu;
747 	int blkaddr;
748 	u64 intr;
749 
750 	rvu = rvu_dl->rvu;
751 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
752 	if (blkaddr < 0)
753 		return IRQ_NONE;
754 
755 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
756 	intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
757 	npa_event_context->npa_af_rvu_gen = intr;
758 
759 	/* Clear interrupts */
760 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
761 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
762 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
763 
764 	return IRQ_HANDLED;
765 }
766 
rvu_npa_err_work(struct work_struct * work)767 static void rvu_npa_err_work(struct work_struct *work)
768 {
769 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
770 
771 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
772 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
773 			      "NPA_AF_ERR Error",
774 			      rvu_npa_health_reporter->npa_event_ctx);
775 }
776 
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)777 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
778 {
779 	struct rvu_npa_event_ctx *npa_event_context;
780 	struct rvu_devlink *rvu_dl = rvu_irq;
781 	struct rvu *rvu;
782 	int blkaddr;
783 	u64 intr;
784 
785 	rvu = rvu_dl->rvu;
786 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
787 	if (blkaddr < 0)
788 		return IRQ_NONE;
789 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
790 	intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
791 	npa_event_context->npa_af_rvu_err = intr;
792 
793 	/* Clear interrupts */
794 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
795 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
796 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
797 
798 	return IRQ_HANDLED;
799 }
800 
rvu_npa_ras_work(struct work_struct * work)801 static void rvu_npa_ras_work(struct work_struct *work)
802 {
803 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
804 
805 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
806 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
807 			      "HW NPA_AF_RAS Error reported",
808 			      rvu_npa_health_reporter->npa_event_ctx);
809 }
810 
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)811 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
812 {
813 	struct rvu_npa_event_ctx *npa_event_context;
814 	struct rvu_devlink *rvu_dl = rvu_irq;
815 	struct rvu *rvu;
816 	int blkaddr;
817 	u64 intr;
818 
819 	rvu = rvu_dl->rvu;
820 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
821 	if (blkaddr < 0)
822 		return IRQ_NONE;
823 
824 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
825 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
826 	npa_event_context->npa_af_rvu_ras = intr;
827 
828 	/* Clear interrupts */
829 	rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
830 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
831 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
832 
833 	return IRQ_HANDLED;
834 }
835 
rvu_npa_unregister_interrupts(struct rvu * rvu)836 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
837 {
838 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
839 	int i, offs, blkaddr;
840 	u64 reg;
841 
842 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
843 	if (blkaddr < 0)
844 		return;
845 
846 	reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
847 	offs = reg & 0x3FF;
848 
849 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
850 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
851 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
852 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
853 
854 	for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
855 		if (rvu->irq_allocated[offs + i]) {
856 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
857 			rvu->irq_allocated[offs + i] = false;
858 		}
859 }
860 
rvu_npa_register_interrupts(struct rvu * rvu)861 static int rvu_npa_register_interrupts(struct rvu *rvu)
862 {
863 	int blkaddr, base;
864 	bool rc;
865 
866 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
867 	if (blkaddr < 0)
868 		return blkaddr;
869 
870 	/* Get NPA AF MSIX vectors offset. */
871 	base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
872 	if (!base) {
873 		dev_warn(rvu->dev,
874 			 "Failed to get NPA_AF_INT vector offsets\n");
875 		return 0;
876 	}
877 
878 	/* Register and enable NPA_AF_RVU_INT interrupt */
879 	rc = rvu_common_request_irq(rvu, base +  NPA_AF_INT_VEC_RVU,
880 				    "NPA_AF_RVU_INT",
881 				    rvu_npa_af_rvu_intr_handler);
882 	if (!rc)
883 		goto err;
884 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
885 
886 	/* Register and enable NPA_AF_GEN_INT interrupt */
887 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
888 				    "NPA_AF_RVU_GEN",
889 				    rvu_npa_af_gen_intr_handler);
890 	if (!rc)
891 		goto err;
892 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
893 
894 	/* Register and enable NPA_AF_ERR_INT interrupt */
895 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
896 				    "NPA_AF_ERR_INT",
897 				    rvu_npa_af_err_intr_handler);
898 	if (!rc)
899 		goto err;
900 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
901 
902 	/* Register and enable NPA_AF_RAS interrupt */
903 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
904 				    "NPA_AF_RAS",
905 				    rvu_npa_af_ras_intr_handler);
906 	if (!rc)
907 		goto err;
908 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
909 
910 	return 0;
911 err:
912 	rvu_npa_unregister_interrupts(rvu);
913 	return rc;
914 }
915 
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)916 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
917 			       enum npa_af_rvu_health health_reporter)
918 {
919 	struct rvu_npa_event_ctx *npa_event_context;
920 	unsigned int alloc_dis, free_dis;
921 	u64 intr_val;
922 	int err;
923 
924 	npa_event_context = ctx;
925 	switch (health_reporter) {
926 	case NPA_AF_RVU_GEN:
927 		intr_val = npa_event_context->npa_af_rvu_gen;
928 		err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
929 		if (err)
930 			return err;
931 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
932 						npa_event_context->npa_af_rvu_gen);
933 		if (err)
934 			return err;
935 		if (intr_val & BIT_ULL(32)) {
936 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
937 			if (err)
938 				return err;
939 		}
940 
941 		free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
942 		if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
943 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
944 			if (err)
945 				return err;
946 		}
947 		if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
948 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
949 			if (err)
950 				return err;
951 		}
952 		if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
953 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
954 			if (err)
955 				return err;
956 		}
957 		if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
958 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
959 			if (err)
960 				return err;
961 		}
962 		if (free_dis & BIT(NPA_INPQ_SSO)) {
963 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
964 			if (err)
965 				return err;
966 		}
967 		if (free_dis & BIT(NPA_INPQ_TIM)) {
968 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
969 			if (err)
970 				return err;
971 		}
972 		if (free_dis & BIT(NPA_INPQ_DPI)) {
973 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
974 			if (err)
975 				return err;
976 		}
977 		if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
978 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
979 			if (err)
980 				return err;
981 		}
982 
983 		alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
984 		if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
985 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
986 			if (err)
987 				return err;
988 		}
989 		if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
990 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
991 			if (err)
992 				return err;
993 		}
994 		if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
995 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
996 			if (err)
997 				return err;
998 		}
999 		if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
1000 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
1001 			if (err)
1002 				return err;
1003 		}
1004 		if (alloc_dis & BIT(NPA_INPQ_SSO)) {
1005 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
1006 			if (err)
1007 				return err;
1008 		}
1009 		if (alloc_dis & BIT(NPA_INPQ_TIM)) {
1010 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
1011 			if (err)
1012 				return err;
1013 		}
1014 		if (alloc_dis & BIT(NPA_INPQ_DPI)) {
1015 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
1016 			if (err)
1017 				return err;
1018 		}
1019 		if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
1020 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
1021 			if (err)
1022 				return err;
1023 		}
1024 		err = rvu_report_pair_end(fmsg);
1025 		if (err)
1026 			return err;
1027 		break;
1028 	case NPA_AF_RVU_ERR:
1029 		err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
1030 		if (err)
1031 			return err;
1032 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
1033 						npa_event_context->npa_af_rvu_err);
1034 		if (err)
1035 			return err;
1036 
1037 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
1038 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
1039 			if (err)
1040 				return err;
1041 		}
1042 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
1043 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
1044 			if (err)
1045 				return err;
1046 		}
1047 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
1048 			err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
1049 			if (err)
1050 				return err;
1051 		}
1052 		err = rvu_report_pair_end(fmsg);
1053 		if (err)
1054 			return err;
1055 		break;
1056 	case NPA_AF_RVU_RAS:
1057 		err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
1058 		if (err)
1059 			return err;
1060 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
1061 						npa_event_context->npa_af_rvu_ras);
1062 		if (err)
1063 			return err;
1064 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
1065 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
1066 			if (err)
1067 				return err;
1068 		}
1069 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
1070 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
1071 			if (err)
1072 				return err;
1073 		}
1074 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
1075 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
1076 			if (err)
1077 				return err;
1078 		}
1079 		err = rvu_report_pair_end(fmsg);
1080 		if (err)
1081 			return err;
1082 		break;
1083 	case NPA_AF_RVU_INTR:
1084 		err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
1085 		if (err)
1086 			return err;
1087 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
1088 						npa_event_context->npa_af_rvu_int);
1089 		if (err)
1090 			return err;
1091 		if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
1092 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
1093 			if (err)
1094 				return err;
1095 		}
1096 		return rvu_report_pair_end(fmsg);
1097 	default:
1098 		return -EINVAL;
1099 	}
1100 
1101 	return 0;
1102 }
1103 
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1104 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
1105 				struct devlink_fmsg *fmsg, void *ctx,
1106 				struct netlink_ext_ack *netlink_extack)
1107 {
1108 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1109 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1110 	struct rvu_npa_event_ctx *npa_ctx;
1111 
1112 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1113 
1114 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
1115 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
1116 }
1117 
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1118 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
1119 				   void *ctx, struct netlink_ext_ack *netlink_extack)
1120 {
1121 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1122 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1123 	int blkaddr;
1124 
1125 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1126 	if (blkaddr < 0)
1127 		return blkaddr;
1128 
1129 	if (npa_event_ctx->npa_af_rvu_int)
1130 		rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
1131 
1132 	return 0;
1133 }
1134 
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1135 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
1136 			       struct devlink_fmsg *fmsg, void *ctx,
1137 			       struct netlink_ext_ack *netlink_extack)
1138 {
1139 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1140 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1141 	struct rvu_npa_event_ctx *npa_ctx;
1142 
1143 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1144 
1145 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
1146 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
1147 }
1148 
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1149 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
1150 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1151 {
1152 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1153 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1154 	int blkaddr;
1155 
1156 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1157 	if (blkaddr < 0)
1158 		return blkaddr;
1159 
1160 	if (npa_event_ctx->npa_af_rvu_gen)
1161 		rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
1162 
1163 	return 0;
1164 }
1165 
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1166 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
1167 			       struct devlink_fmsg *fmsg, void *ctx,
1168 			       struct netlink_ext_ack *netlink_extack)
1169 {
1170 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1171 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1172 	struct rvu_npa_event_ctx *npa_ctx;
1173 
1174 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1175 
1176 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
1177 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
1178 }
1179 
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1180 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
1181 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1182 {
1183 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1184 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1185 	int blkaddr;
1186 
1187 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1188 	if (blkaddr < 0)
1189 		return blkaddr;
1190 
1191 	if (npa_event_ctx->npa_af_rvu_err)
1192 		rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1193 
1194 	return 0;
1195 }
1196 
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1197 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1198 			       struct devlink_fmsg *fmsg, void *ctx,
1199 			       struct netlink_ext_ack *netlink_extack)
1200 {
1201 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1202 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1203 	struct rvu_npa_event_ctx *npa_ctx;
1204 
1205 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1206 
1207 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1208 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1209 }
1210 
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1211 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1212 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1213 {
1214 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1215 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1216 	int blkaddr;
1217 
1218 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1219 	if (blkaddr < 0)
1220 		return blkaddr;
1221 
1222 	if (npa_event_ctx->npa_af_rvu_ras)
1223 		rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1224 
1225 	return 0;
1226 }
1227 
1228 RVU_REPORTERS(hw_npa_intr);
1229 RVU_REPORTERS(hw_npa_gen);
1230 RVU_REPORTERS(hw_npa_err);
1231 RVU_REPORTERS(hw_npa_ras);
1232 
1233 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1234 
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1235 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1236 {
1237 	struct rvu_npa_health_reporters *rvu_reporters;
1238 	struct rvu_npa_event_ctx *npa_event_context;
1239 	struct rvu *rvu = rvu_dl->rvu;
1240 
1241 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1242 	if (!rvu_reporters)
1243 		return -ENOMEM;
1244 
1245 	rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1246 	npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1247 	if (!npa_event_context)
1248 		return -ENOMEM;
1249 
1250 	rvu_reporters->npa_event_ctx = npa_event_context;
1251 	rvu_reporters->rvu_hw_npa_intr_reporter =
1252 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1253 	if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1254 		dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1255 			 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1256 		return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1257 	}
1258 
1259 	rvu_reporters->rvu_hw_npa_gen_reporter =
1260 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1261 	if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1262 		dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1263 			 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1264 		return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1265 	}
1266 
1267 	rvu_reporters->rvu_hw_npa_err_reporter =
1268 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1269 	if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1270 		dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1271 			 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1272 		return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1273 	}
1274 
1275 	rvu_reporters->rvu_hw_npa_ras_reporter =
1276 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1277 	if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1278 		dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1279 			 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1280 		return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1281 	}
1282 
1283 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1284 	if (!rvu_dl->devlink_wq)
1285 		return -ENOMEM;
1286 
1287 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1288 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1289 	INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1290 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1291 
1292 	return 0;
1293 }
1294 
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1295 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1296 {
1297 	struct rvu *rvu = rvu_dl->rvu;
1298 	int err;
1299 
1300 	err = rvu_npa_register_reporters(rvu_dl);
1301 	if (err) {
1302 		dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1303 			 err);
1304 		return err;
1305 	}
1306 	rvu_npa_register_interrupts(rvu);
1307 
1308 	return 0;
1309 }
1310 
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1311 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1312 {
1313 	struct rvu_npa_health_reporters *npa_reporters;
1314 	struct rvu *rvu = rvu_dl->rvu;
1315 
1316 	npa_reporters = rvu_dl->rvu_npa_health_reporter;
1317 
1318 	if (!npa_reporters->rvu_hw_npa_ras_reporter)
1319 		return;
1320 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1321 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1322 
1323 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1324 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1325 
1326 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1327 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1328 
1329 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1330 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1331 
1332 	rvu_npa_unregister_interrupts(rvu);
1333 	kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1334 	kfree(rvu_dl->rvu_npa_health_reporter);
1335 }
1336 
rvu_health_reporters_create(struct rvu * rvu)1337 static int rvu_health_reporters_create(struct rvu *rvu)
1338 {
1339 	struct rvu_devlink *rvu_dl;
1340 	int err;
1341 
1342 	rvu_dl = rvu->rvu_dl;
1343 	err = rvu_npa_health_reporters_create(rvu_dl);
1344 	if (err)
1345 		return err;
1346 
1347 	return rvu_nix_health_reporters_create(rvu_dl);
1348 }
1349 
rvu_health_reporters_destroy(struct rvu * rvu)1350 static void rvu_health_reporters_destroy(struct rvu *rvu)
1351 {
1352 	struct rvu_devlink *rvu_dl;
1353 
1354 	if (!rvu->rvu_dl)
1355 		return;
1356 
1357 	rvu_dl = rvu->rvu_dl;
1358 	rvu_npa_health_reporters_destroy(rvu_dl);
1359 	rvu_nix_health_reporters_destroy(rvu_dl);
1360 }
1361 
1362 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1363 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1364 				       union devlink_param_value val,
1365 				       struct netlink_ext_ack *extack)
1366 {
1367 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1368 	struct rvu *rvu = rvu_dl->rvu;
1369 	int dwrr_mtu = val.vu32;
1370 	struct nix_txsch *txsch;
1371 	struct nix_hw *nix_hw;
1372 
1373 	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1374 		NL_SET_ERR_MSG_MOD(extack,
1375 				   "Setting DWRR_MTU is not supported on this silicon");
1376 		return -EOPNOTSUPP;
1377 	}
1378 
1379 	if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1380 	    (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1381 		NL_SET_ERR_MSG_MOD(extack,
1382 				   "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1383 		return -EINVAL;
1384 	}
1385 
1386 	nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1387 	if (!nix_hw)
1388 		return -ENODEV;
1389 
1390 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1391 	if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1392 		NL_SET_ERR_MSG_MOD(extack,
1393 				   "Changing DWRR MTU is not supported when there are active NIXLFs");
1394 		NL_SET_ERR_MSG_MOD(extack,
1395 				   "Make sure none of the PF/VF interfaces are initialized and retry");
1396 		return -EOPNOTSUPP;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1402 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1403 				  struct devlink_param_gset_ctx *ctx)
1404 {
1405 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1406 	struct rvu *rvu = rvu_dl->rvu;
1407 	u64 dwrr_mtu;
1408 
1409 	dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1410 	rvu_write64(rvu, BLKADDR_NIX0,
1411 		    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1412 
1413 	return 0;
1414 }
1415 
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1416 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1417 				  struct devlink_param_gset_ctx *ctx)
1418 {
1419 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1420 	struct rvu *rvu = rvu_dl->rvu;
1421 	u64 dwrr_mtu;
1422 
1423 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
1424 		return -EOPNOTSUPP;
1425 
1426 	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1427 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1428 	ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1429 
1430 	return 0;
1431 }
1432 
1433 enum rvu_af_dl_param_id {
1434 	RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1435 	RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1436 	RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1437 	RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1438 };
1439 
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1440 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1441 					struct devlink_param_gset_ctx *ctx)
1442 {
1443 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1444 	struct rvu *rvu = rvu_dl->rvu;
1445 	bool enabled;
1446 
1447 	enabled = rvu_npc_exact_has_match_table(rvu);
1448 
1449 	snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1450 		 enabled ? "enabled" : "disabled");
1451 
1452 	return 0;
1453 }
1454 
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1455 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1456 					    struct devlink_param_gset_ctx *ctx)
1457 {
1458 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1459 	struct rvu *rvu = rvu_dl->rvu;
1460 
1461 	rvu_npc_exact_disable_feature(rvu);
1462 
1463 	return 0;
1464 }
1465 
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1466 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1467 					     union devlink_param_value val,
1468 					     struct netlink_ext_ack *extack)
1469 {
1470 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1471 	struct rvu *rvu = rvu_dl->rvu;
1472 	u64 enable;
1473 
1474 	if (kstrtoull(val.vstr, 10, &enable)) {
1475 		NL_SET_ERR_MSG_MOD(extack,
1476 				   "Only 1 value is supported");
1477 		return -EINVAL;
1478 	}
1479 
1480 	if (enable != 1) {
1481 		NL_SET_ERR_MSG_MOD(extack,
1482 				   "Only disabling exact match feature is supported");
1483 		return -EINVAL;
1484 	}
1485 
1486 	if (rvu_npc_exact_can_disable_feature(rvu))
1487 		return 0;
1488 
1489 	NL_SET_ERR_MSG_MOD(extack,
1490 			   "Can't disable exact match feature; Please try before any configuration");
1491 	return -EFAULT;
1492 }
1493 
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1494 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1495 						    struct devlink_param_gset_ctx *ctx)
1496 {
1497 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1498 	struct rvu *rvu = rvu_dl->rvu;
1499 	struct npc_mcam *mcam;
1500 	u32 percent;
1501 
1502 	mcam = &rvu->hw->mcam;
1503 	percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1504 	ctx->val.vu8 = (u8)percent;
1505 
1506 	return 0;
1507 }
1508 
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1509 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1510 						    struct devlink_param_gset_ctx *ctx)
1511 {
1512 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1513 	struct rvu *rvu = rvu_dl->rvu;
1514 	struct npc_mcam *mcam;
1515 	u32 percent;
1516 
1517 	percent = ctx->val.vu8;
1518 	mcam = &rvu->hw->mcam;
1519 	mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1520 	mcam->hprio_end = mcam->hprio_count;
1521 	mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1522 	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1523 
1524 	return 0;
1525 }
1526 
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1527 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1528 							 union devlink_param_value val,
1529 							 struct netlink_ext_ack *extack)
1530 {
1531 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1532 	struct rvu *rvu = rvu_dl->rvu;
1533 	struct npc_mcam *mcam;
1534 
1535 	/* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1536 	if (val.vu8 < 12 || val.vu8 > 100) {
1537 		NL_SET_ERR_MSG_MOD(extack,
1538 				   "mcam high zone percent must be between 12% to 100%");
1539 		return -EINVAL;
1540 	}
1541 
1542 	/* Do not allow user to modify the high priority zone entries while mcam entries
1543 	 * have already been assigned.
1544 	 */
1545 	mcam = &rvu->hw->mcam;
1546 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1547 		NL_SET_ERR_MSG_MOD(extack,
1548 				   "mcam entries have already been assigned, can't resize");
1549 		return -EPERM;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 static const struct devlink_param rvu_af_dl_params[] = {
1556 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1557 			     "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1558 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1559 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1560 			     rvu_af_dl_dwrr_mtu_validate),
1561 };
1562 
1563 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1564 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1565 			     "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1566 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1567 			     rvu_af_npc_exact_feature_get,
1568 			     rvu_af_npc_exact_feature_disable,
1569 			     rvu_af_npc_exact_feature_validate),
1570 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1571 			     "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1572 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1573 			     rvu_af_dl_npc_mcam_high_zone_percent_get,
1574 			     rvu_af_dl_npc_mcam_high_zone_percent_set,
1575 			     rvu_af_dl_npc_mcam_high_zone_percent_validate),
1576 };
1577 
1578 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1579 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1580 {
1581 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1582 	struct rvu *rvu = rvu_dl->rvu;
1583 	struct rvu_switch *rswitch;
1584 
1585 	rswitch = &rvu->rswitch;
1586 	*mode = rswitch->mode;
1587 
1588 	return 0;
1589 }
1590 
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1591 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1592 					struct netlink_ext_ack *extack)
1593 {
1594 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1595 	struct rvu *rvu = rvu_dl->rvu;
1596 	struct rvu_switch *rswitch;
1597 
1598 	rswitch = &rvu->rswitch;
1599 	switch (mode) {
1600 	case DEVLINK_ESWITCH_MODE_LEGACY:
1601 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1602 		if (rswitch->mode == mode)
1603 			return 0;
1604 		rswitch->mode = mode;
1605 		if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1606 			rvu_switch_enable(rvu);
1607 		else
1608 			rvu_switch_disable(rvu);
1609 		break;
1610 	default:
1611 		return -EINVAL;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 static const struct devlink_ops rvu_devlink_ops = {
1618 	.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1619 	.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1620 };
1621 
rvu_register_dl(struct rvu * rvu)1622 int rvu_register_dl(struct rvu *rvu)
1623 {
1624 	struct rvu_devlink *rvu_dl;
1625 	struct devlink *dl;
1626 	int err;
1627 
1628 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1629 			   rvu->dev);
1630 	if (!dl) {
1631 		dev_warn(rvu->dev, "devlink_alloc failed\n");
1632 		return -ENOMEM;
1633 	}
1634 
1635 	rvu_dl = devlink_priv(dl);
1636 	rvu_dl->dl = dl;
1637 	rvu_dl->rvu = rvu;
1638 	rvu->rvu_dl = rvu_dl;
1639 
1640 	err = rvu_health_reporters_create(rvu);
1641 	if (err) {
1642 		dev_err(rvu->dev,
1643 			"devlink health reporter creation failed with error %d\n", err);
1644 		goto err_dl_health;
1645 	}
1646 
1647 	err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1648 	if (err) {
1649 		dev_err(rvu->dev,
1650 			"devlink params register failed with error %d", err);
1651 		goto err_dl_health;
1652 	}
1653 
1654 	/* Register exact match devlink only for CN10K-B */
1655 	if (!rvu_npc_exact_has_match_table(rvu))
1656 		goto done;
1657 
1658 	err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1659 				      ARRAY_SIZE(rvu_af_dl_param_exact_match));
1660 	if (err) {
1661 		dev_err(rvu->dev,
1662 			"devlink exact match params register failed with error %d", err);
1663 		goto err_dl_exact_match;
1664 	}
1665 
1666 done:
1667 	devlink_register(dl);
1668 	return 0;
1669 
1670 err_dl_exact_match:
1671 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1672 
1673 err_dl_health:
1674 	rvu_health_reporters_destroy(rvu);
1675 	devlink_free(dl);
1676 	return err;
1677 }
1678 
rvu_unregister_dl(struct rvu * rvu)1679 void rvu_unregister_dl(struct rvu *rvu)
1680 {
1681 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1682 	struct devlink *dl = rvu_dl->dl;
1683 
1684 	devlink_unregister(dl);
1685 
1686 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1687 
1688 	/* Unregister exact match devlink only for CN10K-B */
1689 	if (rvu_npc_exact_has_match_table(rvu))
1690 		devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1691 					  ARRAY_SIZE(rvu_af_dl_param_exact_match));
1692 
1693 	rvu_health_reporters_destroy(rvu);
1694 	devlink_free(dl);
1695 }
1696