1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
17
18 #include "cgx.h"
19 #include "rvu.h"
20 #include "rvu_reg.h"
21 #include "ptp.h"
22
23 #include "rvu_trace.h"
24
25 #define DRV_NAME "octeontx2-af"
26 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
27
28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
29
30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 struct rvu_block *block, int lf);
32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33 struct rvu_block *block, int lf);
34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
35
36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
37 int type, int num,
38 void (mbox_handler)(struct work_struct *),
39 void (mbox_up_handler)(struct work_struct *));
40 enum {
41 TYPE_AFVF,
42 TYPE_AFPF,
43 };
44
45 /* Supported devices */
46 static const struct pci_device_id rvu_id_table[] = {
47 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
48 { 0, } /* end of table */
49 };
50
51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
52 MODULE_DESCRIPTION(DRV_STRING);
53 MODULE_LICENSE("GPL v2");
54 MODULE_DEVICE_TABLE(pci, rvu_id_table);
55
56 static char *mkex_profile; /* MKEX profile name */
57 module_param(mkex_profile, charp, 0000);
58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
59
rvu_setup_hw_capabilities(struct rvu * rvu)60 static void rvu_setup_hw_capabilities(struct rvu *rvu)
61 {
62 struct rvu_hwinfo *hw = rvu->hw;
63
64 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
65 hw->cap.nix_fixed_txschq_mapping = false;
66 hw->cap.nix_shaping = true;
67 hw->cap.nix_tx_link_bp = true;
68 hw->cap.nix_rx_multicast = true;
69
70 if (is_rvu_96xx_B0(rvu)) {
71 hw->cap.nix_fixed_txschq_mapping = true;
72 hw->cap.nix_txsch_per_cgx_lmac = 4;
73 hw->cap.nix_txsch_per_lbk_lmac = 132;
74 hw->cap.nix_txsch_per_sdp_lmac = 76;
75 hw->cap.nix_shaping = false;
76 hw->cap.nix_tx_link_bp = false;
77 if (is_rvu_96xx_A0(rvu))
78 hw->cap.nix_rx_multicast = false;
79 }
80 }
81
82 /* Poll a RVU block's register 'offset', for a 'zero'
83 * or 'nonzero' at bits specified by 'mask'
84 */
rvu_poll_reg(struct rvu * rvu,u64 block,u64 offset,u64 mask,bool zero)85 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
86 {
87 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
88 bool twice = false;
89 void __iomem *reg;
90 u64 reg_val;
91
92 reg = rvu->afreg_base + ((block << 28) | offset);
93 again:
94 reg_val = readq(reg);
95 if (zero && !(reg_val & mask))
96 return 0;
97 if (!zero && (reg_val & mask))
98 return 0;
99 if (time_before(jiffies, timeout)) {
100 usleep_range(1, 5);
101 goto again;
102 }
103 /* In scenarios where CPU is scheduled out before checking
104 * 'time_before' (above) and gets scheduled in such that
105 * jiffies are beyond timeout value, then check again if HW is
106 * done with the operation in the meantime.
107 */
108 if (!twice) {
109 twice = true;
110 goto again;
111 }
112 return -EBUSY;
113 }
114
rvu_alloc_rsrc(struct rsrc_bmap * rsrc)115 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
116 {
117 int id;
118
119 if (!rsrc->bmap)
120 return -EINVAL;
121
122 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
123 if (id >= rsrc->max)
124 return -ENOSPC;
125
126 __set_bit(id, rsrc->bmap);
127
128 return id;
129 }
130
rvu_alloc_rsrc_contig(struct rsrc_bmap * rsrc,int nrsrc)131 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
132 {
133 int start;
134
135 if (!rsrc->bmap)
136 return -EINVAL;
137
138 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
139 if (start >= rsrc->max)
140 return -ENOSPC;
141
142 bitmap_set(rsrc->bmap, start, nrsrc);
143 return start;
144 }
145
rvu_free_rsrc_contig(struct rsrc_bmap * rsrc,int nrsrc,int start)146 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
147 {
148 if (!rsrc->bmap)
149 return;
150 if (start >= rsrc->max)
151 return;
152
153 bitmap_clear(rsrc->bmap, start, nrsrc);
154 }
155
rvu_rsrc_check_contig(struct rsrc_bmap * rsrc,int nrsrc)156 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
157 {
158 int start;
159
160 if (!rsrc->bmap)
161 return false;
162
163 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
164 if (start >= rsrc->max)
165 return false;
166
167 return true;
168 }
169
rvu_free_rsrc(struct rsrc_bmap * rsrc,int id)170 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
171 {
172 if (!rsrc->bmap)
173 return;
174
175 __clear_bit(id, rsrc->bmap);
176 }
177
rvu_rsrc_free_count(struct rsrc_bmap * rsrc)178 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
179 {
180 int used;
181
182 if (!rsrc->bmap)
183 return 0;
184
185 used = bitmap_weight(rsrc->bmap, rsrc->max);
186 return (rsrc->max - used);
187 }
188
rvu_alloc_bitmap(struct rsrc_bmap * rsrc)189 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
190 {
191 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
192 sizeof(long), GFP_KERNEL);
193 if (!rsrc->bmap)
194 return -ENOMEM;
195 return 0;
196 }
197
198 /* Get block LF's HW index from a PF_FUNC's block slot number */
rvu_get_lf(struct rvu * rvu,struct rvu_block * block,u16 pcifunc,u16 slot)199 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
200 {
201 u16 match = 0;
202 int lf;
203
204 mutex_lock(&rvu->rsrc_lock);
205 for (lf = 0; lf < block->lf.max; lf++) {
206 if (block->fn_map[lf] == pcifunc) {
207 if (slot == match) {
208 mutex_unlock(&rvu->rsrc_lock);
209 return lf;
210 }
211 match++;
212 }
213 }
214 mutex_unlock(&rvu->rsrc_lock);
215 return -ENODEV;
216 }
217
218 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
219 * Some silicon variants of OcteonTX2 supports
220 * multiple blocks of same type.
221 *
222 * @pcifunc has to be zero when no LF is yet attached.
223 */
rvu_get_blkaddr(struct rvu * rvu,int blktype,u16 pcifunc)224 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
225 {
226 int devnum, blkaddr = -ENODEV;
227 u64 cfg, reg;
228 bool is_pf;
229
230 switch (blktype) {
231 case BLKTYPE_NPC:
232 blkaddr = BLKADDR_NPC;
233 goto exit;
234 case BLKTYPE_NPA:
235 blkaddr = BLKADDR_NPA;
236 goto exit;
237 case BLKTYPE_NIX:
238 /* For now assume NIX0 */
239 if (!pcifunc) {
240 blkaddr = BLKADDR_NIX0;
241 goto exit;
242 }
243 break;
244 case BLKTYPE_SSO:
245 blkaddr = BLKADDR_SSO;
246 goto exit;
247 case BLKTYPE_SSOW:
248 blkaddr = BLKADDR_SSOW;
249 goto exit;
250 case BLKTYPE_TIM:
251 blkaddr = BLKADDR_TIM;
252 goto exit;
253 case BLKTYPE_CPT:
254 /* For now assume CPT0 */
255 if (!pcifunc) {
256 blkaddr = BLKADDR_CPT0;
257 goto exit;
258 }
259 break;
260 }
261
262 /* Check if this is a RVU PF or VF */
263 if (pcifunc & RVU_PFVF_FUNC_MASK) {
264 is_pf = false;
265 devnum = rvu_get_hwvf(rvu, pcifunc);
266 } else {
267 is_pf = true;
268 devnum = rvu_get_pf(pcifunc);
269 }
270
271 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
272 if (blktype == BLKTYPE_NIX) {
273 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
274 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
275 if (cfg)
276 blkaddr = BLKADDR_NIX0;
277 }
278
279 /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
280 if (blktype == BLKTYPE_CPT) {
281 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
282 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
283 if (cfg)
284 blkaddr = BLKADDR_CPT0;
285 }
286
287 exit:
288 if (is_block_implemented(rvu->hw, blkaddr))
289 return blkaddr;
290 return -ENODEV;
291 }
292
rvu_update_rsrc_map(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,u16 pcifunc,u16 lf,bool attach)293 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
294 struct rvu_block *block, u16 pcifunc,
295 u16 lf, bool attach)
296 {
297 int devnum, num_lfs = 0;
298 bool is_pf;
299 u64 reg;
300
301 if (lf >= block->lf.max) {
302 dev_err(&rvu->pdev->dev,
303 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
304 __func__, lf, block->name, block->lf.max);
305 return;
306 }
307
308 /* Check if this is for a RVU PF or VF */
309 if (pcifunc & RVU_PFVF_FUNC_MASK) {
310 is_pf = false;
311 devnum = rvu_get_hwvf(rvu, pcifunc);
312 } else {
313 is_pf = true;
314 devnum = rvu_get_pf(pcifunc);
315 }
316
317 block->fn_map[lf] = attach ? pcifunc : 0;
318
319 switch (block->addr) {
320 case BLKADDR_NPA:
321 pfvf->npalf = attach ? true : false;
322 num_lfs = pfvf->npalf;
323 break;
324 case BLKADDR_NIX0:
325 case BLKADDR_NIX1:
326 pfvf->nixlf = attach ? true : false;
327 num_lfs = pfvf->nixlf;
328 break;
329 case BLKADDR_SSO:
330 attach ? pfvf->sso++ : pfvf->sso--;
331 num_lfs = pfvf->sso;
332 break;
333 case BLKADDR_SSOW:
334 attach ? pfvf->ssow++ : pfvf->ssow--;
335 num_lfs = pfvf->ssow;
336 break;
337 case BLKADDR_TIM:
338 attach ? pfvf->timlfs++ : pfvf->timlfs--;
339 num_lfs = pfvf->timlfs;
340 break;
341 case BLKADDR_CPT0:
342 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
343 num_lfs = pfvf->cptlfs;
344 break;
345 case BLKADDR_CPT1:
346 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
347 num_lfs = pfvf->cpt1_lfs;
348 break;
349 }
350
351 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
352 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
353 }
354
rvu_get_pf(u16 pcifunc)355 inline int rvu_get_pf(u16 pcifunc)
356 {
357 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
358 }
359
rvu_get_pf_numvfs(struct rvu * rvu,int pf,int * numvfs,int * hwvf)360 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
361 {
362 u64 cfg;
363
364 /* Get numVFs attached to this PF and first HWVF */
365 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
366 *numvfs = (cfg >> 12) & 0xFF;
367 *hwvf = cfg & 0xFFF;
368 }
369
rvu_get_hwvf(struct rvu * rvu,int pcifunc)370 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
371 {
372 int pf, func;
373 u64 cfg;
374
375 pf = rvu_get_pf(pcifunc);
376 func = pcifunc & RVU_PFVF_FUNC_MASK;
377
378 /* Get first HWVF attached to this PF */
379 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
380
381 return ((cfg & 0xFFF) + func - 1);
382 }
383
rvu_get_pfvf(struct rvu * rvu,int pcifunc)384 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
385 {
386 /* Check if it is a PF or VF */
387 if (pcifunc & RVU_PFVF_FUNC_MASK)
388 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
389 else
390 return &rvu->pf[rvu_get_pf(pcifunc)];
391 }
392
is_pf_func_valid(struct rvu * rvu,u16 pcifunc)393 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
394 {
395 int pf, vf, nvfs;
396 u64 cfg;
397
398 pf = rvu_get_pf(pcifunc);
399 if (pf >= rvu->hw->total_pfs)
400 return false;
401
402 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
403 return true;
404
405 /* Check if VF is within number of VFs attached to this PF */
406 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
407 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
408 nvfs = (cfg >> 12) & 0xFF;
409 if (vf >= nvfs)
410 return false;
411
412 return true;
413 }
414
is_block_implemented(struct rvu_hwinfo * hw,int blkaddr)415 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
416 {
417 struct rvu_block *block;
418
419 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
420 return false;
421
422 block = &hw->block[blkaddr];
423 return block->implemented;
424 }
425
rvu_check_block_implemented(struct rvu * rvu)426 static void rvu_check_block_implemented(struct rvu *rvu)
427 {
428 struct rvu_hwinfo *hw = rvu->hw;
429 struct rvu_block *block;
430 int blkid;
431 u64 cfg;
432
433 /* For each block check if 'implemented' bit is set */
434 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
435 block = &hw->block[blkid];
436 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
437 if (cfg & BIT_ULL(11))
438 block->implemented = true;
439 }
440 }
441
rvu_setup_rvum_blk_revid(struct rvu * rvu)442 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
443 {
444 rvu_write64(rvu, BLKADDR_RVUM,
445 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
446 RVU_BLK_RVUM_REVID);
447 }
448
rvu_clear_rvum_blk_revid(struct rvu * rvu)449 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
450 {
451 rvu_write64(rvu, BLKADDR_RVUM,
452 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
453 }
454
rvu_lf_reset(struct rvu * rvu,struct rvu_block * block,int lf)455 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
456 {
457 int err;
458
459 if (!block->implemented)
460 return 0;
461
462 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
463 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
464 true);
465 return err;
466 }
467
rvu_block_reset(struct rvu * rvu,int blkaddr,u64 rst_reg)468 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
469 {
470 struct rvu_block *block = &rvu->hw->block[blkaddr];
471
472 if (!block->implemented)
473 return;
474
475 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
476 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
477 }
478
rvu_reset_all_blocks(struct rvu * rvu)479 static void rvu_reset_all_blocks(struct rvu *rvu)
480 {
481 /* Do a HW reset of all RVU blocks */
482 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
483 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
484 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
485 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
486 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
487 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
488 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
489 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
490 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
491 }
492
rvu_scan_block(struct rvu * rvu,struct rvu_block * block)493 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
494 {
495 struct rvu_pfvf *pfvf;
496 u64 cfg;
497 int lf;
498
499 for (lf = 0; lf < block->lf.max; lf++) {
500 cfg = rvu_read64(rvu, block->addr,
501 block->lfcfg_reg | (lf << block->lfshift));
502 if (!(cfg & BIT_ULL(63)))
503 continue;
504
505 /* Set this resource as being used */
506 __set_bit(lf, block->lf.bmap);
507
508 /* Get, to whom this LF is attached */
509 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
510 rvu_update_rsrc_map(rvu, pfvf, block,
511 (cfg >> 8) & 0xFFFF, lf, true);
512
513 /* Set start MSIX vector for this LF within this PF/VF */
514 rvu_set_msix_offset(rvu, pfvf, block, lf);
515 }
516 }
517
rvu_check_min_msix_vec(struct rvu * rvu,int nvecs,int pf,int vf)518 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
519 {
520 int min_vecs;
521
522 if (!vf)
523 goto check_pf;
524
525 if (!nvecs) {
526 dev_warn(rvu->dev,
527 "PF%d:VF%d is configured with zero msix vectors, %d\n",
528 pf, vf - 1, nvecs);
529 }
530 return;
531
532 check_pf:
533 if (pf == 0)
534 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
535 else
536 min_vecs = RVU_PF_INT_VEC_CNT;
537
538 if (!(nvecs < min_vecs))
539 return;
540 dev_warn(rvu->dev,
541 "PF%d is configured with too few vectors, %d, min is %d\n",
542 pf, nvecs, min_vecs);
543 }
544
rvu_setup_msix_resources(struct rvu * rvu)545 static int rvu_setup_msix_resources(struct rvu *rvu)
546 {
547 struct rvu_hwinfo *hw = rvu->hw;
548 int pf, vf, numvfs, hwvf, err;
549 int nvecs, offset, max_msix;
550 struct rvu_pfvf *pfvf;
551 u64 cfg, phy_addr;
552 dma_addr_t iova;
553
554 for (pf = 0; pf < hw->total_pfs; pf++) {
555 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
556 /* If PF is not enabled, nothing to do */
557 if (!((cfg >> 20) & 0x01))
558 continue;
559
560 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
561
562 pfvf = &rvu->pf[pf];
563 /* Get num of MSIX vectors attached to this PF */
564 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
565 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
566 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
567
568 /* Alloc msix bitmap for this PF */
569 err = rvu_alloc_bitmap(&pfvf->msix);
570 if (err)
571 return err;
572
573 /* Allocate memory for MSIX vector to RVU block LF mapping */
574 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
575 sizeof(u16), GFP_KERNEL);
576 if (!pfvf->msix_lfmap)
577 return -ENOMEM;
578
579 /* For PF0 (AF) firmware will set msix vector offsets for
580 * AF, block AF and PF0_INT vectors, so jump to VFs.
581 */
582 if (!pf)
583 goto setup_vfmsix;
584
585 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
586 * These are allocated on driver init and never freed,
587 * so no need to set 'msix_lfmap' for these.
588 */
589 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
590 nvecs = (cfg >> 12) & 0xFF;
591 cfg &= ~0x7FFULL;
592 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
593 rvu_write64(rvu, BLKADDR_RVUM,
594 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
595 setup_vfmsix:
596 /* Alloc msix bitmap for VFs */
597 for (vf = 0; vf < numvfs; vf++) {
598 pfvf = &rvu->hwvf[hwvf + vf];
599 /* Get num of MSIX vectors attached to this VF */
600 cfg = rvu_read64(rvu, BLKADDR_RVUM,
601 RVU_PRIV_PFX_MSIX_CFG(pf));
602 pfvf->msix.max = (cfg & 0xFFF) + 1;
603 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
604
605 /* Alloc msix bitmap for this VF */
606 err = rvu_alloc_bitmap(&pfvf->msix);
607 if (err)
608 return err;
609
610 pfvf->msix_lfmap =
611 devm_kcalloc(rvu->dev, pfvf->msix.max,
612 sizeof(u16), GFP_KERNEL);
613 if (!pfvf->msix_lfmap)
614 return -ENOMEM;
615
616 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
617 * These are allocated on driver init and never freed,
618 * so no need to set 'msix_lfmap' for these.
619 */
620 cfg = rvu_read64(rvu, BLKADDR_RVUM,
621 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
622 nvecs = (cfg >> 12) & 0xFF;
623 cfg &= ~0x7FFULL;
624 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
625 rvu_write64(rvu, BLKADDR_RVUM,
626 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
627 cfg | offset);
628 }
629 }
630
631 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
632 * create a IOMMU mapping for the physcial address configured by
633 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
634 */
635 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
636 max_msix = cfg & 0xFFFFF;
637 if (rvu->fwdata && rvu->fwdata->msixtr_base)
638 phy_addr = rvu->fwdata->msixtr_base;
639 else
640 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
641
642 iova = dma_map_resource(rvu->dev, phy_addr,
643 max_msix * PCI_MSIX_ENTRY_SIZE,
644 DMA_BIDIRECTIONAL, 0);
645
646 if (dma_mapping_error(rvu->dev, iova))
647 return -ENOMEM;
648
649 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
650 rvu->msix_base_iova = iova;
651 rvu->msixtr_base_phy = phy_addr;
652
653 return 0;
654 }
655
rvu_reset_msix(struct rvu * rvu)656 static void rvu_reset_msix(struct rvu *rvu)
657 {
658 /* Restore msixtr base register */
659 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
660 rvu->msixtr_base_phy);
661 }
662
rvu_free_hw_resources(struct rvu * rvu)663 static void rvu_free_hw_resources(struct rvu *rvu)
664 {
665 struct rvu_hwinfo *hw = rvu->hw;
666 struct rvu_block *block;
667 struct rvu_pfvf *pfvf;
668 int id, max_msix;
669 u64 cfg;
670
671 rvu_npa_freemem(rvu);
672 rvu_npc_freemem(rvu);
673 rvu_nix_freemem(rvu);
674
675 /* Free block LF bitmaps */
676 for (id = 0; id < BLK_COUNT; id++) {
677 block = &hw->block[id];
678 kfree(block->lf.bmap);
679 }
680
681 /* Free MSIX bitmaps */
682 for (id = 0; id < hw->total_pfs; id++) {
683 pfvf = &rvu->pf[id];
684 kfree(pfvf->msix.bmap);
685 }
686
687 for (id = 0; id < hw->total_vfs; id++) {
688 pfvf = &rvu->hwvf[id];
689 kfree(pfvf->msix.bmap);
690 }
691
692 /* Unmap MSIX vector base IOVA mapping */
693 if (!rvu->msix_base_iova)
694 return;
695 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
696 max_msix = cfg & 0xFFFFF;
697 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
698 max_msix * PCI_MSIX_ENTRY_SIZE,
699 DMA_BIDIRECTIONAL, 0);
700
701 rvu_reset_msix(rvu);
702 mutex_destroy(&rvu->rsrc_lock);
703 }
704
rvu_setup_pfvf_macaddress(struct rvu * rvu)705 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
706 {
707 struct rvu_hwinfo *hw = rvu->hw;
708 int pf, vf, numvfs, hwvf;
709 struct rvu_pfvf *pfvf;
710 u64 *mac;
711
712 for (pf = 0; pf < hw->total_pfs; pf++) {
713 if (!is_pf_cgxmapped(rvu, pf))
714 continue;
715 /* Assign MAC address to PF */
716 pfvf = &rvu->pf[pf];
717 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
718 mac = &rvu->fwdata->pf_macs[pf];
719 if (*mac)
720 u64_to_ether_addr(*mac, pfvf->mac_addr);
721 else
722 eth_random_addr(pfvf->mac_addr);
723 } else {
724 eth_random_addr(pfvf->mac_addr);
725 }
726
727 /* Assign MAC address to VFs */
728 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
729 for (vf = 0; vf < numvfs; vf++, hwvf++) {
730 pfvf = &rvu->hwvf[hwvf];
731 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
732 mac = &rvu->fwdata->vf_macs[hwvf];
733 if (*mac)
734 u64_to_ether_addr(*mac, pfvf->mac_addr);
735 else
736 eth_random_addr(pfvf->mac_addr);
737 } else {
738 eth_random_addr(pfvf->mac_addr);
739 }
740 }
741 }
742 }
743
rvu_fwdata_init(struct rvu * rvu)744 static int rvu_fwdata_init(struct rvu *rvu)
745 {
746 u64 fwdbase;
747 int err;
748
749 /* Get firmware data base address */
750 err = cgx_get_fwdata_base(&fwdbase);
751 if (err)
752 goto fail;
753 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
754 if (!rvu->fwdata)
755 goto fail;
756 if (!is_rvu_fwdata_valid(rvu)) {
757 dev_err(rvu->dev,
758 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
759 iounmap(rvu->fwdata);
760 rvu->fwdata = NULL;
761 return -EINVAL;
762 }
763 return 0;
764 fail:
765 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
766 return -EIO;
767 }
768
rvu_fwdata_exit(struct rvu * rvu)769 static void rvu_fwdata_exit(struct rvu *rvu)
770 {
771 if (rvu->fwdata)
772 iounmap(rvu->fwdata);
773 }
774
rvu_setup_hw_resources(struct rvu * rvu)775 static int rvu_setup_hw_resources(struct rvu *rvu)
776 {
777 struct rvu_hwinfo *hw = rvu->hw;
778 struct rvu_block *block;
779 int blkid, err;
780 u64 cfg;
781
782 /* Get HW supported max RVU PF & VF count */
783 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
784 hw->total_pfs = (cfg >> 32) & 0xFF;
785 hw->total_vfs = (cfg >> 20) & 0xFFF;
786 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
787
788 /* Init NPA LF's bitmap */
789 block = &hw->block[BLKADDR_NPA];
790 if (!block->implemented)
791 goto nix;
792 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
793 block->lf.max = (cfg >> 16) & 0xFFF;
794 block->addr = BLKADDR_NPA;
795 block->type = BLKTYPE_NPA;
796 block->lfshift = 8;
797 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
798 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
799 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
800 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
801 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
802 block->lfreset_reg = NPA_AF_LF_RST;
803 sprintf(block->name, "NPA");
804 err = rvu_alloc_bitmap(&block->lf);
805 if (err)
806 return err;
807
808 nix:
809 /* Init NIX LF's bitmap */
810 block = &hw->block[BLKADDR_NIX0];
811 if (!block->implemented)
812 goto sso;
813 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
814 block->lf.max = cfg & 0xFFF;
815 block->addr = BLKADDR_NIX0;
816 block->type = BLKTYPE_NIX;
817 block->lfshift = 8;
818 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
819 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
820 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
821 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
822 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
823 block->lfreset_reg = NIX_AF_LF_RST;
824 sprintf(block->name, "NIX");
825 err = rvu_alloc_bitmap(&block->lf);
826 if (err)
827 return err;
828
829 sso:
830 /* Init SSO group's bitmap */
831 block = &hw->block[BLKADDR_SSO];
832 if (!block->implemented)
833 goto ssow;
834 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
835 block->lf.max = cfg & 0xFFFF;
836 block->addr = BLKADDR_SSO;
837 block->type = BLKTYPE_SSO;
838 block->multislot = true;
839 block->lfshift = 3;
840 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
841 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
842 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
843 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
844 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
845 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
846 sprintf(block->name, "SSO GROUP");
847 err = rvu_alloc_bitmap(&block->lf);
848 if (err)
849 return err;
850
851 ssow:
852 /* Init SSO workslot's bitmap */
853 block = &hw->block[BLKADDR_SSOW];
854 if (!block->implemented)
855 goto tim;
856 block->lf.max = (cfg >> 56) & 0xFF;
857 block->addr = BLKADDR_SSOW;
858 block->type = BLKTYPE_SSOW;
859 block->multislot = true;
860 block->lfshift = 3;
861 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
862 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
863 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
864 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
865 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
866 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
867 sprintf(block->name, "SSOWS");
868 err = rvu_alloc_bitmap(&block->lf);
869 if (err)
870 return err;
871
872 tim:
873 /* Init TIM LF's bitmap */
874 block = &hw->block[BLKADDR_TIM];
875 if (!block->implemented)
876 goto cpt;
877 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
878 block->lf.max = cfg & 0xFFFF;
879 block->addr = BLKADDR_TIM;
880 block->type = BLKTYPE_TIM;
881 block->multislot = true;
882 block->lfshift = 3;
883 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
884 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
885 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
886 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
887 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
888 block->lfreset_reg = TIM_AF_LF_RST;
889 sprintf(block->name, "TIM");
890 err = rvu_alloc_bitmap(&block->lf);
891 if (err)
892 return err;
893
894 cpt:
895 /* Init CPT LF's bitmap */
896 block = &hw->block[BLKADDR_CPT0];
897 if (!block->implemented)
898 goto init;
899 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
900 block->lf.max = cfg & 0xFF;
901 block->addr = BLKADDR_CPT0;
902 block->type = BLKTYPE_CPT;
903 block->multislot = true;
904 block->lfshift = 3;
905 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
906 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
907 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
908 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
909 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
910 block->lfreset_reg = CPT_AF_LF_RST;
911 sprintf(block->name, "CPT");
912 err = rvu_alloc_bitmap(&block->lf);
913 if (err)
914 return err;
915
916 init:
917 /* Allocate memory for PFVF data */
918 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
919 sizeof(struct rvu_pfvf), GFP_KERNEL);
920 if (!rvu->pf)
921 return -ENOMEM;
922
923 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
924 sizeof(struct rvu_pfvf), GFP_KERNEL);
925 if (!rvu->hwvf)
926 return -ENOMEM;
927
928 mutex_init(&rvu->rsrc_lock);
929
930 rvu_fwdata_init(rvu);
931
932 err = rvu_setup_msix_resources(rvu);
933 if (err)
934 return err;
935
936 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
937 block = &hw->block[blkid];
938 if (!block->lf.bmap)
939 continue;
940
941 /* Allocate memory for block LF/slot to pcifunc mapping info */
942 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
943 sizeof(u16), GFP_KERNEL);
944 if (!block->fn_map) {
945 err = -ENOMEM;
946 goto msix_err;
947 }
948
949 /* Scan all blocks to check if low level firmware has
950 * already provisioned any of the resources to a PF/VF.
951 */
952 rvu_scan_block(rvu, block);
953 }
954
955 err = rvu_npc_init(rvu);
956 if (err)
957 goto npc_err;
958
959 err = rvu_cgx_init(rvu);
960 if (err)
961 goto cgx_err;
962
963 /* Assign MACs for CGX mapped functions */
964 rvu_setup_pfvf_macaddress(rvu);
965
966 err = rvu_npa_init(rvu);
967 if (err)
968 goto npa_err;
969
970 err = rvu_nix_init(rvu);
971 if (err)
972 goto nix_err;
973
974 return 0;
975
976 nix_err:
977 rvu_nix_freemem(rvu);
978 npa_err:
979 rvu_npa_freemem(rvu);
980 cgx_err:
981 rvu_cgx_exit(rvu);
982 npc_err:
983 rvu_npc_freemem(rvu);
984 rvu_fwdata_exit(rvu);
985 msix_err:
986 rvu_reset_msix(rvu);
987 return err;
988 }
989
990 /* NPA and NIX admin queue APIs */
rvu_aq_free(struct rvu * rvu,struct admin_queue * aq)991 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
992 {
993 if (!aq)
994 return;
995
996 qmem_free(rvu->dev, aq->inst);
997 qmem_free(rvu->dev, aq->res);
998 devm_kfree(rvu->dev, aq);
999 }
1000
rvu_aq_alloc(struct rvu * rvu,struct admin_queue ** ad_queue,int qsize,int inst_size,int res_size)1001 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1002 int qsize, int inst_size, int res_size)
1003 {
1004 struct admin_queue *aq;
1005 int err;
1006
1007 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1008 if (!*ad_queue)
1009 return -ENOMEM;
1010 aq = *ad_queue;
1011
1012 /* Alloc memory for instructions i.e AQ */
1013 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1014 if (err) {
1015 devm_kfree(rvu->dev, aq);
1016 return err;
1017 }
1018
1019 /* Alloc memory for results */
1020 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1021 if (err) {
1022 rvu_aq_free(rvu, aq);
1023 return err;
1024 }
1025
1026 spin_lock_init(&aq->lock);
1027 return 0;
1028 }
1029
rvu_mbox_handler_ready(struct rvu * rvu,struct msg_req * req,struct ready_msg_rsp * rsp)1030 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1031 struct ready_msg_rsp *rsp)
1032 {
1033 if (rvu->fwdata) {
1034 rsp->rclk_freq = rvu->fwdata->rclk;
1035 rsp->sclk_freq = rvu->fwdata->sclk;
1036 }
1037 return 0;
1038 }
1039
1040 /* Get current count of a RVU block's LF/slots
1041 * provisioned to a given RVU func.
1042 */
rvu_get_rsrc_mapcount(struct rvu_pfvf * pfvf,int blkaddr)1043 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1044 {
1045 switch (blkaddr) {
1046 case BLKADDR_NPA:
1047 return pfvf->npalf ? 1 : 0;
1048 case BLKADDR_NIX0:
1049 case BLKADDR_NIX1:
1050 return pfvf->nixlf ? 1 : 0;
1051 case BLKADDR_SSO:
1052 return pfvf->sso;
1053 case BLKADDR_SSOW:
1054 return pfvf->ssow;
1055 case BLKADDR_TIM:
1056 return pfvf->timlfs;
1057 case BLKADDR_CPT0:
1058 return pfvf->cptlfs;
1059 case BLKADDR_CPT1:
1060 return pfvf->cpt1_lfs;
1061 }
1062 return 0;
1063 }
1064
1065 /* Return true if LFs of block type are attached to pcifunc */
is_blktype_attached(struct rvu_pfvf * pfvf,int blktype)1066 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1067 {
1068 switch (blktype) {
1069 case BLKTYPE_NPA:
1070 return pfvf->npalf ? 1 : 0;
1071 case BLKTYPE_NIX:
1072 return pfvf->nixlf ? 1 : 0;
1073 case BLKTYPE_SSO:
1074 return !!pfvf->sso;
1075 case BLKTYPE_SSOW:
1076 return !!pfvf->ssow;
1077 case BLKTYPE_TIM:
1078 return !!pfvf->timlfs;
1079 case BLKTYPE_CPT:
1080 return pfvf->cptlfs || pfvf->cpt1_lfs;
1081 }
1082
1083 return false;
1084 }
1085
is_pffunc_map_valid(struct rvu * rvu,u16 pcifunc,int blktype)1086 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1087 {
1088 struct rvu_pfvf *pfvf;
1089
1090 if (!is_pf_func_valid(rvu, pcifunc))
1091 return false;
1092
1093 pfvf = rvu_get_pfvf(rvu, pcifunc);
1094
1095 /* Check if this PFFUNC has a LF of type blktype attached */
1096 if (!is_blktype_attached(pfvf, blktype))
1097 return false;
1098
1099 return true;
1100 }
1101
rvu_lookup_rsrc(struct rvu * rvu,struct rvu_block * block,int pcifunc,int slot)1102 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1103 int pcifunc, int slot)
1104 {
1105 u64 val;
1106
1107 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1108 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1109 /* Wait for the lookup to finish */
1110 /* TODO: put some timeout here */
1111 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1112 ;
1113
1114 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1115
1116 /* Check LF valid bit */
1117 if (!(val & (1ULL << 12)))
1118 return -1;
1119
1120 return (val & 0xFFF);
1121 }
1122
rvu_detach_block(struct rvu * rvu,int pcifunc,int blktype)1123 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1124 {
1125 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1126 struct rvu_hwinfo *hw = rvu->hw;
1127 struct rvu_block *block;
1128 int slot, lf, num_lfs;
1129 int blkaddr;
1130
1131 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1132 if (blkaddr < 0)
1133 return;
1134
1135 block = &hw->block[blkaddr];
1136
1137 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1138 if (!num_lfs)
1139 return;
1140
1141 for (slot = 0; slot < num_lfs; slot++) {
1142 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1143 if (lf < 0) /* This should never happen */
1144 continue;
1145
1146 /* Disable the LF */
1147 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1148 (lf << block->lfshift), 0x00ULL);
1149
1150 /* Update SW maintained mapping info as well */
1151 rvu_update_rsrc_map(rvu, pfvf, block,
1152 pcifunc, lf, false);
1153
1154 /* Free the resource */
1155 rvu_free_rsrc(&block->lf, lf);
1156
1157 /* Clear MSIX vector offset for this LF */
1158 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1159 }
1160 }
1161
rvu_detach_rsrcs(struct rvu * rvu,struct rsrc_detach * detach,u16 pcifunc)1162 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1163 u16 pcifunc)
1164 {
1165 struct rvu_hwinfo *hw = rvu->hw;
1166 bool detach_all = true;
1167 struct rvu_block *block;
1168 int blkid;
1169
1170 mutex_lock(&rvu->rsrc_lock);
1171
1172 /* Check for partial resource detach */
1173 if (detach && detach->partial)
1174 detach_all = false;
1175
1176 /* Check for RVU block's LFs attached to this func,
1177 * if so, detach them.
1178 */
1179 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1180 block = &hw->block[blkid];
1181 if (!block->lf.bmap)
1182 continue;
1183 if (!detach_all && detach) {
1184 if (blkid == BLKADDR_NPA && !detach->npalf)
1185 continue;
1186 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1187 continue;
1188 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1189 continue;
1190 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1191 continue;
1192 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1193 continue;
1194 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1195 continue;
1196 }
1197 rvu_detach_block(rvu, pcifunc, block->type);
1198 }
1199
1200 mutex_unlock(&rvu->rsrc_lock);
1201 return 0;
1202 }
1203
rvu_mbox_handler_detach_resources(struct rvu * rvu,struct rsrc_detach * detach,struct msg_rsp * rsp)1204 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1205 struct rsrc_detach *detach,
1206 struct msg_rsp *rsp)
1207 {
1208 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1209 }
1210
rvu_get_nix_blkaddr(struct rvu * rvu,u16 pcifunc)1211 static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1212 {
1213 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1214 int blkaddr = BLKADDR_NIX0, vf;
1215 struct rvu_pfvf *pf;
1216
1217 /* All CGX mapped PFs are set with assigned NIX block during init */
1218 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1219 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1220 blkaddr = pf->nix_blkaddr;
1221 } else if (is_afvf(pcifunc)) {
1222 vf = pcifunc - 1;
1223 /* Assign NIX based on VF number. All even numbered VFs get
1224 * NIX0 and odd numbered gets NIX1
1225 */
1226 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1227 /* NIX1 is not present on all silicons */
1228 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1229 blkaddr = BLKADDR_NIX0;
1230 }
1231
1232 switch (blkaddr) {
1233 case BLKADDR_NIX1:
1234 pfvf->nix_blkaddr = BLKADDR_NIX1;
1235 break;
1236 case BLKADDR_NIX0:
1237 default:
1238 pfvf->nix_blkaddr = BLKADDR_NIX0;
1239 break;
1240 }
1241
1242 return pfvf->nix_blkaddr;
1243 }
1244
rvu_get_attach_blkaddr(struct rvu * rvu,int blktype,u16 pcifunc)1245 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
1246 {
1247 int blkaddr;
1248
1249 switch (blktype) {
1250 case BLKTYPE_NIX:
1251 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1252 break;
1253 default:
1254 return rvu_get_blkaddr(rvu, blktype, 0);
1255 };
1256
1257 if (is_block_implemented(rvu->hw, blkaddr))
1258 return blkaddr;
1259
1260 return -ENODEV;
1261 }
1262
rvu_attach_block(struct rvu * rvu,int pcifunc,int blktype,int num_lfs)1263 static void rvu_attach_block(struct rvu *rvu, int pcifunc,
1264 int blktype, int num_lfs)
1265 {
1266 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1267 struct rvu_hwinfo *hw = rvu->hw;
1268 struct rvu_block *block;
1269 int slot, lf;
1270 int blkaddr;
1271 u64 cfg;
1272
1273 if (!num_lfs)
1274 return;
1275
1276 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc);
1277 if (blkaddr < 0)
1278 return;
1279
1280 block = &hw->block[blkaddr];
1281 if (!block->lf.bmap)
1282 return;
1283
1284 for (slot = 0; slot < num_lfs; slot++) {
1285 /* Allocate the resource */
1286 lf = rvu_alloc_rsrc(&block->lf);
1287 if (lf < 0)
1288 return;
1289
1290 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1291 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1292 (lf << block->lfshift), cfg);
1293 rvu_update_rsrc_map(rvu, pfvf, block,
1294 pcifunc, lf, true);
1295
1296 /* Set start MSIX vector for this LF within this PF/VF */
1297 rvu_set_msix_offset(rvu, pfvf, block, lf);
1298 }
1299 }
1300
rvu_check_rsrc_availability(struct rvu * rvu,struct rsrc_attach * req,u16 pcifunc)1301 static int rvu_check_rsrc_availability(struct rvu *rvu,
1302 struct rsrc_attach *req, u16 pcifunc)
1303 {
1304 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1305 int free_lfs, mappedlfs, blkaddr;
1306 struct rvu_hwinfo *hw = rvu->hw;
1307 struct rvu_block *block;
1308
1309 /* Only one NPA LF can be attached */
1310 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1311 block = &hw->block[BLKADDR_NPA];
1312 free_lfs = rvu_rsrc_free_count(&block->lf);
1313 if (!free_lfs)
1314 goto fail;
1315 } else if (req->npalf) {
1316 dev_err(&rvu->pdev->dev,
1317 "Func 0x%x: Invalid req, already has NPA\n",
1318 pcifunc);
1319 return -EINVAL;
1320 }
1321
1322 /* Only one NIX LF can be attached */
1323 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1324 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1325 if (blkaddr < 0)
1326 return blkaddr;
1327 block = &hw->block[blkaddr];
1328 free_lfs = rvu_rsrc_free_count(&block->lf);
1329 if (!free_lfs)
1330 goto fail;
1331 } else if (req->nixlf) {
1332 dev_err(&rvu->pdev->dev,
1333 "Func 0x%x: Invalid req, already has NIX\n",
1334 pcifunc);
1335 return -EINVAL;
1336 }
1337
1338 if (req->sso) {
1339 block = &hw->block[BLKADDR_SSO];
1340 /* Is request within limits ? */
1341 if (req->sso > block->lf.max) {
1342 dev_err(&rvu->pdev->dev,
1343 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1344 pcifunc, req->sso, block->lf.max);
1345 return -EINVAL;
1346 }
1347 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1348 free_lfs = rvu_rsrc_free_count(&block->lf);
1349 /* Check if additional resources are available */
1350 if (req->sso > mappedlfs &&
1351 ((req->sso - mappedlfs) > free_lfs))
1352 goto fail;
1353 }
1354
1355 if (req->ssow) {
1356 block = &hw->block[BLKADDR_SSOW];
1357 if (req->ssow > block->lf.max) {
1358 dev_err(&rvu->pdev->dev,
1359 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1360 pcifunc, req->sso, block->lf.max);
1361 return -EINVAL;
1362 }
1363 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1364 free_lfs = rvu_rsrc_free_count(&block->lf);
1365 if (req->ssow > mappedlfs &&
1366 ((req->ssow - mappedlfs) > free_lfs))
1367 goto fail;
1368 }
1369
1370 if (req->timlfs) {
1371 block = &hw->block[BLKADDR_TIM];
1372 if (req->timlfs > block->lf.max) {
1373 dev_err(&rvu->pdev->dev,
1374 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1375 pcifunc, req->timlfs, block->lf.max);
1376 return -EINVAL;
1377 }
1378 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1379 free_lfs = rvu_rsrc_free_count(&block->lf);
1380 if (req->timlfs > mappedlfs &&
1381 ((req->timlfs - mappedlfs) > free_lfs))
1382 goto fail;
1383 }
1384
1385 if (req->cptlfs) {
1386 block = &hw->block[BLKADDR_CPT0];
1387 if (req->cptlfs > block->lf.max) {
1388 dev_err(&rvu->pdev->dev,
1389 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1390 pcifunc, req->cptlfs, block->lf.max);
1391 return -EINVAL;
1392 }
1393 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1394 free_lfs = rvu_rsrc_free_count(&block->lf);
1395 if (req->cptlfs > mappedlfs &&
1396 ((req->cptlfs - mappedlfs) > free_lfs))
1397 goto fail;
1398 }
1399
1400 return 0;
1401
1402 fail:
1403 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1404 return -ENOSPC;
1405 }
1406
rvu_mbox_handler_attach_resources(struct rvu * rvu,struct rsrc_attach * attach,struct msg_rsp * rsp)1407 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1408 struct rsrc_attach *attach,
1409 struct msg_rsp *rsp)
1410 {
1411 u16 pcifunc = attach->hdr.pcifunc;
1412 int err;
1413
1414 /* If first request, detach all existing attached resources */
1415 if (!attach->modify)
1416 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1417
1418 mutex_lock(&rvu->rsrc_lock);
1419
1420 /* Check if the request can be accommodated */
1421 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1422 if (err)
1423 goto exit;
1424
1425 /* Now attach the requested resources */
1426 if (attach->npalf)
1427 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
1428
1429 if (attach->nixlf)
1430 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
1431
1432 if (attach->sso) {
1433 /* RVU func doesn't know which exact LF or slot is attached
1434 * to it, it always sees as slot 0,1,2. So for a 'modify'
1435 * request, simply detach all existing attached LFs/slots
1436 * and attach a fresh.
1437 */
1438 if (attach->modify)
1439 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1440 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
1441 }
1442
1443 if (attach->ssow) {
1444 if (attach->modify)
1445 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1446 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
1447 }
1448
1449 if (attach->timlfs) {
1450 if (attach->modify)
1451 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1452 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
1453 }
1454
1455 if (attach->cptlfs) {
1456 if (attach->modify)
1457 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1458 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
1459 }
1460
1461 exit:
1462 mutex_unlock(&rvu->rsrc_lock);
1463 return err;
1464 }
1465
rvu_get_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,int blkaddr,int lf)1466 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1467 int blkaddr, int lf)
1468 {
1469 u16 vec;
1470
1471 if (lf < 0)
1472 return MSIX_VECTOR_INVALID;
1473
1474 for (vec = 0; vec < pfvf->msix.max; vec++) {
1475 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1476 return vec;
1477 }
1478 return MSIX_VECTOR_INVALID;
1479 }
1480
rvu_set_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,int lf)1481 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1482 struct rvu_block *block, int lf)
1483 {
1484 u16 nvecs, vec, offset;
1485 u64 cfg;
1486
1487 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1488 (lf << block->lfshift));
1489 nvecs = (cfg >> 12) & 0xFF;
1490
1491 /* Check and alloc MSIX vectors, must be contiguous */
1492 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1493 return;
1494
1495 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1496
1497 /* Config MSIX offset in LF */
1498 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1499 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1500
1501 /* Update the bitmap as well */
1502 for (vec = 0; vec < nvecs; vec++)
1503 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1504 }
1505
rvu_clear_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,int lf)1506 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1507 struct rvu_block *block, int lf)
1508 {
1509 u16 nvecs, vec, offset;
1510 u64 cfg;
1511
1512 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1513 (lf << block->lfshift));
1514 nvecs = (cfg >> 12) & 0xFF;
1515
1516 /* Clear MSIX offset in LF */
1517 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1518 (lf << block->lfshift), cfg & ~0x7FFULL);
1519
1520 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1521
1522 /* Update the mapping */
1523 for (vec = 0; vec < nvecs; vec++)
1524 pfvf->msix_lfmap[offset + vec] = 0;
1525
1526 /* Free the same in MSIX bitmap */
1527 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1528 }
1529
rvu_mbox_handler_msix_offset(struct rvu * rvu,struct msg_req * req,struct msix_offset_rsp * rsp)1530 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1531 struct msix_offset_rsp *rsp)
1532 {
1533 struct rvu_hwinfo *hw = rvu->hw;
1534 u16 pcifunc = req->hdr.pcifunc;
1535 struct rvu_pfvf *pfvf;
1536 int lf, slot;
1537
1538 pfvf = rvu_get_pfvf(rvu, pcifunc);
1539 if (!pfvf->msix.bmap)
1540 return 0;
1541
1542 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1543 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1544 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1545
1546 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
1547 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
1548
1549 rsp->sso = pfvf->sso;
1550 for (slot = 0; slot < rsp->sso; slot++) {
1551 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1552 rsp->sso_msixoff[slot] =
1553 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1554 }
1555
1556 rsp->ssow = pfvf->ssow;
1557 for (slot = 0; slot < rsp->ssow; slot++) {
1558 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1559 rsp->ssow_msixoff[slot] =
1560 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1561 }
1562
1563 rsp->timlfs = pfvf->timlfs;
1564 for (slot = 0; slot < rsp->timlfs; slot++) {
1565 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1566 rsp->timlf_msixoff[slot] =
1567 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1568 }
1569
1570 rsp->cptlfs = pfvf->cptlfs;
1571 for (slot = 0; slot < rsp->cptlfs; slot++) {
1572 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1573 rsp->cptlf_msixoff[slot] =
1574 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1575 }
1576 return 0;
1577 }
1578
rvu_mbox_handler_vf_flr(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1579 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1580 struct msg_rsp *rsp)
1581 {
1582 u16 pcifunc = req->hdr.pcifunc;
1583 u16 vf, numvfs;
1584 u64 cfg;
1585
1586 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1587 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1588 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1589 numvfs = (cfg >> 12) & 0xFF;
1590
1591 if (vf && vf <= numvfs)
1592 __rvu_flr_handler(rvu, pcifunc);
1593 else
1594 return RVU_INVALID_VF_ID;
1595
1596 return 0;
1597 }
1598
rvu_mbox_handler_get_hw_cap(struct rvu * rvu,struct msg_req * req,struct get_hw_cap_rsp * rsp)1599 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1600 struct get_hw_cap_rsp *rsp)
1601 {
1602 struct rvu_hwinfo *hw = rvu->hw;
1603
1604 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1605 rsp->nix_shaping = hw->cap.nix_shaping;
1606
1607 return 0;
1608 }
1609
rvu_process_mbox_msg(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * req)1610 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1611 struct mbox_msghdr *req)
1612 {
1613 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1614
1615 /* Check if valid, if not reply with a invalid msg */
1616 if (req->sig != OTX2_MBOX_REQ_SIG)
1617 goto bad_message;
1618
1619 switch (req->id) {
1620 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1621 case _id: { \
1622 struct _rsp_type *rsp; \
1623 int err; \
1624 \
1625 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
1626 mbox, devid, \
1627 sizeof(struct _rsp_type)); \
1628 /* some handlers should complete even if reply */ \
1629 /* could not be allocated */ \
1630 if (!rsp && \
1631 _id != MBOX_MSG_DETACH_RESOURCES && \
1632 _id != MBOX_MSG_NIX_TXSCH_FREE && \
1633 _id != MBOX_MSG_VF_FLR) \
1634 return -ENOMEM; \
1635 if (rsp) { \
1636 rsp->hdr.id = _id; \
1637 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
1638 rsp->hdr.pcifunc = req->pcifunc; \
1639 rsp->hdr.rc = 0; \
1640 } \
1641 \
1642 err = rvu_mbox_handler_ ## _fn_name(rvu, \
1643 (struct _req_type *)req, \
1644 rsp); \
1645 if (rsp && err) \
1646 rsp->hdr.rc = err; \
1647 \
1648 trace_otx2_msg_process(mbox->pdev, _id, err); \
1649 return rsp ? err : -ENOMEM; \
1650 }
1651 MBOX_MESSAGES
1652 #undef M
1653
1654 bad_message:
1655 default:
1656 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1657 return -ENODEV;
1658 }
1659 }
1660
__rvu_mbox_handler(struct rvu_work * mwork,int type)1661 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1662 {
1663 struct rvu *rvu = mwork->rvu;
1664 int offset, err, id, devid;
1665 struct otx2_mbox_dev *mdev;
1666 struct mbox_hdr *req_hdr;
1667 struct mbox_msghdr *msg;
1668 struct mbox_wq_info *mw;
1669 struct otx2_mbox *mbox;
1670
1671 switch (type) {
1672 case TYPE_AFPF:
1673 mw = &rvu->afpf_wq_info;
1674 break;
1675 case TYPE_AFVF:
1676 mw = &rvu->afvf_wq_info;
1677 break;
1678 default:
1679 return;
1680 }
1681
1682 devid = mwork - mw->mbox_wrk;
1683 mbox = &mw->mbox;
1684 mdev = &mbox->dev[devid];
1685
1686 /* Process received mbox messages */
1687 req_hdr = mdev->mbase + mbox->rx_start;
1688 if (mw->mbox_wrk[devid].num_msgs == 0)
1689 return;
1690
1691 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1692
1693 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1694 msg = mdev->mbase + offset;
1695
1696 /* Set which PF/VF sent this message based on mbox IRQ */
1697 switch (type) {
1698 case TYPE_AFPF:
1699 msg->pcifunc &=
1700 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1701 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1702 break;
1703 case TYPE_AFVF:
1704 msg->pcifunc &=
1705 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1706 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1707 break;
1708 }
1709
1710 err = rvu_process_mbox_msg(mbox, devid, msg);
1711 if (!err) {
1712 offset = mbox->rx_start + msg->next_msgoff;
1713 continue;
1714 }
1715
1716 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1717 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1718 err, otx2_mbox_id2name(msg->id),
1719 msg->id, rvu_get_pf(msg->pcifunc),
1720 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1721 else
1722 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1723 err, otx2_mbox_id2name(msg->id),
1724 msg->id, devid);
1725 }
1726 mw->mbox_wrk[devid].num_msgs = 0;
1727
1728 /* Send mbox responses to VF/PF */
1729 otx2_mbox_msg_send(mbox, devid);
1730 }
1731
rvu_afpf_mbox_handler(struct work_struct * work)1732 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1733 {
1734 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1735
1736 __rvu_mbox_handler(mwork, TYPE_AFPF);
1737 }
1738
rvu_afvf_mbox_handler(struct work_struct * work)1739 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1740 {
1741 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1742
1743 __rvu_mbox_handler(mwork, TYPE_AFVF);
1744 }
1745
__rvu_mbox_up_handler(struct rvu_work * mwork,int type)1746 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1747 {
1748 struct rvu *rvu = mwork->rvu;
1749 struct otx2_mbox_dev *mdev;
1750 struct mbox_hdr *rsp_hdr;
1751 struct mbox_msghdr *msg;
1752 struct mbox_wq_info *mw;
1753 struct otx2_mbox *mbox;
1754 int offset, id, devid;
1755
1756 switch (type) {
1757 case TYPE_AFPF:
1758 mw = &rvu->afpf_wq_info;
1759 break;
1760 case TYPE_AFVF:
1761 mw = &rvu->afvf_wq_info;
1762 break;
1763 default:
1764 return;
1765 }
1766
1767 devid = mwork - mw->mbox_wrk_up;
1768 mbox = &mw->mbox_up;
1769 mdev = &mbox->dev[devid];
1770
1771 rsp_hdr = mdev->mbase + mbox->rx_start;
1772 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1773 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1774 return;
1775 }
1776
1777 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1778
1779 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1780 msg = mdev->mbase + offset;
1781
1782 if (msg->id >= MBOX_MSG_MAX) {
1783 dev_err(rvu->dev,
1784 "Mbox msg with unknown ID 0x%x\n", msg->id);
1785 goto end;
1786 }
1787
1788 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1789 dev_err(rvu->dev,
1790 "Mbox msg with wrong signature %x, ID 0x%x\n",
1791 msg->sig, msg->id);
1792 goto end;
1793 }
1794
1795 switch (msg->id) {
1796 case MBOX_MSG_CGX_LINK_EVENT:
1797 break;
1798 default:
1799 if (msg->rc)
1800 dev_err(rvu->dev,
1801 "Mbox msg response has err %d, ID 0x%x\n",
1802 msg->rc, msg->id);
1803 break;
1804 }
1805 end:
1806 offset = mbox->rx_start + msg->next_msgoff;
1807 mdev->msgs_acked++;
1808 }
1809 mw->mbox_wrk_up[devid].up_num_msgs = 0;
1810
1811 otx2_mbox_reset(mbox, devid);
1812 }
1813
rvu_afpf_mbox_up_handler(struct work_struct * work)1814 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
1815 {
1816 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1817
1818 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
1819 }
1820
rvu_afvf_mbox_up_handler(struct work_struct * work)1821 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
1822 {
1823 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1824
1825 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
1826 }
1827
rvu_mbox_init(struct rvu * rvu,struct mbox_wq_info * mw,int type,int num,void (mbox_handler)(struct work_struct *),void (mbox_up_handler)(struct work_struct *))1828 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
1829 int type, int num,
1830 void (mbox_handler)(struct work_struct *),
1831 void (mbox_up_handler)(struct work_struct *))
1832 {
1833 void __iomem *hwbase = NULL, *reg_base;
1834 int err, i, dir, dir_up;
1835 struct rvu_work *mwork;
1836 const char *name;
1837 u64 bar4_addr;
1838
1839 switch (type) {
1840 case TYPE_AFPF:
1841 name = "rvu_afpf_mailbox";
1842 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
1843 dir = MBOX_DIR_AFPF;
1844 dir_up = MBOX_DIR_AFPF_UP;
1845 reg_base = rvu->afreg_base;
1846 break;
1847 case TYPE_AFVF:
1848 name = "rvu_afvf_mailbox";
1849 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
1850 dir = MBOX_DIR_PFVF;
1851 dir_up = MBOX_DIR_PFVF_UP;
1852 reg_base = rvu->pfreg_base;
1853 break;
1854 default:
1855 return -EINVAL;
1856 }
1857
1858 mw->mbox_wq = alloc_workqueue(name,
1859 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1860 num);
1861 if (!mw->mbox_wq)
1862 return -ENOMEM;
1863
1864 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
1865 sizeof(struct rvu_work), GFP_KERNEL);
1866 if (!mw->mbox_wrk) {
1867 err = -ENOMEM;
1868 goto exit;
1869 }
1870
1871 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
1872 sizeof(struct rvu_work), GFP_KERNEL);
1873 if (!mw->mbox_wrk_up) {
1874 err = -ENOMEM;
1875 goto exit;
1876 }
1877
1878 /* Mailbox is a reserved memory (in RAM) region shared between
1879 * RVU devices, shouldn't be mapped as device memory to allow
1880 * unaligned accesses.
1881 */
1882 hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
1883 if (!hwbase) {
1884 dev_err(rvu->dev, "Unable to map mailbox region\n");
1885 err = -ENOMEM;
1886 goto exit;
1887 }
1888
1889 err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
1890 if (err)
1891 goto exit;
1892
1893 err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
1894 reg_base, dir_up, num);
1895 if (err)
1896 goto exit;
1897
1898 for (i = 0; i < num; i++) {
1899 mwork = &mw->mbox_wrk[i];
1900 mwork->rvu = rvu;
1901 INIT_WORK(&mwork->work, mbox_handler);
1902
1903 mwork = &mw->mbox_wrk_up[i];
1904 mwork->rvu = rvu;
1905 INIT_WORK(&mwork->work, mbox_up_handler);
1906 }
1907
1908 return 0;
1909 exit:
1910 if (hwbase)
1911 iounmap((void __iomem *)hwbase);
1912 destroy_workqueue(mw->mbox_wq);
1913 return err;
1914 }
1915
rvu_mbox_destroy(struct mbox_wq_info * mw)1916 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
1917 {
1918 if (mw->mbox_wq) {
1919 flush_workqueue(mw->mbox_wq);
1920 destroy_workqueue(mw->mbox_wq);
1921 mw->mbox_wq = NULL;
1922 }
1923
1924 if (mw->mbox.hwbase)
1925 iounmap((void __iomem *)mw->mbox.hwbase);
1926
1927 otx2_mbox_destroy(&mw->mbox);
1928 otx2_mbox_destroy(&mw->mbox_up);
1929 }
1930
rvu_queue_work(struct mbox_wq_info * mw,int first,int mdevs,u64 intr)1931 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
1932 int mdevs, u64 intr)
1933 {
1934 struct otx2_mbox_dev *mdev;
1935 struct otx2_mbox *mbox;
1936 struct mbox_hdr *hdr;
1937 int i;
1938
1939 for (i = first; i < mdevs; i++) {
1940 /* start from 0 */
1941 if (!(intr & BIT_ULL(i - first)))
1942 continue;
1943
1944 mbox = &mw->mbox;
1945 mdev = &mbox->dev[i];
1946 hdr = mdev->mbase + mbox->rx_start;
1947
1948 /*The hdr->num_msgs is set to zero immediately in the interrupt
1949 * handler to ensure that it holds a correct value next time
1950 * when the interrupt handler is called.
1951 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
1952 * pf>mbox.up_num_msgs holds the data for use in
1953 * pfaf_mbox_up_handler.
1954 */
1955
1956 if (hdr->num_msgs) {
1957 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
1958 hdr->num_msgs = 0;
1959 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
1960 }
1961 mbox = &mw->mbox_up;
1962 mdev = &mbox->dev[i];
1963 hdr = mdev->mbase + mbox->rx_start;
1964 if (hdr->num_msgs) {
1965 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
1966 hdr->num_msgs = 0;
1967 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
1968 }
1969 }
1970 }
1971
rvu_mbox_intr_handler(int irq,void * rvu_irq)1972 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
1973 {
1974 struct rvu *rvu = (struct rvu *)rvu_irq;
1975 int vfs = rvu->vfs;
1976 u64 intr;
1977
1978 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
1979 /* Clear interrupts */
1980 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
1981 if (intr)
1982 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
1983
1984 /* Sync with mbox memory region */
1985 rmb();
1986
1987 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
1988
1989 /* Handle VF interrupts */
1990 if (vfs > 64) {
1991 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
1992 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
1993
1994 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
1995 vfs -= 64;
1996 }
1997
1998 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
1999 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2000 if (intr)
2001 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2002
2003 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2004
2005 return IRQ_HANDLED;
2006 }
2007
rvu_enable_mbox_intr(struct rvu * rvu)2008 static void rvu_enable_mbox_intr(struct rvu *rvu)
2009 {
2010 struct rvu_hwinfo *hw = rvu->hw;
2011
2012 /* Clear spurious irqs, if any */
2013 rvu_write64(rvu, BLKADDR_RVUM,
2014 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2015
2016 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2017 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2018 INTR_MASK(hw->total_pfs) & ~1ULL);
2019 }
2020
rvu_blklf_teardown(struct rvu * rvu,u16 pcifunc,u8 blkaddr)2021 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2022 {
2023 struct rvu_block *block;
2024 int slot, lf, num_lfs;
2025 int err;
2026
2027 block = &rvu->hw->block[blkaddr];
2028 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2029 block->addr);
2030 if (!num_lfs)
2031 return;
2032 for (slot = 0; slot < num_lfs; slot++) {
2033 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2034 if (lf < 0)
2035 continue;
2036
2037 /* Cleanup LF and reset it */
2038 if (block->addr == BLKADDR_NIX0)
2039 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2040 else if (block->addr == BLKADDR_NPA)
2041 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2042
2043 err = rvu_lf_reset(rvu, block, lf);
2044 if (err) {
2045 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2046 block->addr, lf);
2047 }
2048 }
2049 }
2050
__rvu_flr_handler(struct rvu * rvu,u16 pcifunc)2051 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2052 {
2053 mutex_lock(&rvu->flr_lock);
2054 /* Reset order should reflect inter-block dependencies:
2055 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2056 * 2. Flush and reset SSO/SSOW
2057 * 3. Cleanup pools (NPA)
2058 */
2059 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2060 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2061 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2062 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2063 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2064 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2065 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2066 mutex_unlock(&rvu->flr_lock);
2067 }
2068
rvu_afvf_flr_handler(struct rvu * rvu,int vf)2069 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2070 {
2071 int reg = 0;
2072
2073 /* pcifunc = 0(PF0) | (vf + 1) */
2074 __rvu_flr_handler(rvu, vf + 1);
2075
2076 if (vf >= 64) {
2077 reg = 1;
2078 vf = vf - 64;
2079 }
2080
2081 /* Signal FLR finish and enable IRQ */
2082 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2083 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2084 }
2085
rvu_flr_handler(struct work_struct * work)2086 static void rvu_flr_handler(struct work_struct *work)
2087 {
2088 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2089 struct rvu *rvu = flrwork->rvu;
2090 u16 pcifunc, numvfs, vf;
2091 u64 cfg;
2092 int pf;
2093
2094 pf = flrwork - rvu->flr_wrk;
2095 if (pf >= rvu->hw->total_pfs) {
2096 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2097 return;
2098 }
2099
2100 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2101 numvfs = (cfg >> 12) & 0xFF;
2102 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2103
2104 for (vf = 0; vf < numvfs; vf++)
2105 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2106
2107 __rvu_flr_handler(rvu, pcifunc);
2108
2109 /* Signal FLR finish */
2110 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2111
2112 /* Enable interrupt */
2113 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2114 }
2115
rvu_afvf_queue_flr_work(struct rvu * rvu,int start_vf,int numvfs)2116 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2117 {
2118 int dev, vf, reg = 0;
2119 u64 intr;
2120
2121 if (start_vf >= 64)
2122 reg = 1;
2123
2124 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2125 if (!intr)
2126 return;
2127
2128 for (vf = 0; vf < numvfs; vf++) {
2129 if (!(intr & BIT_ULL(vf)))
2130 continue;
2131 dev = vf + start_vf + rvu->hw->total_pfs;
2132 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2133 /* Clear and disable the interrupt */
2134 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2135 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2136 }
2137 }
2138
rvu_flr_intr_handler(int irq,void * rvu_irq)2139 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2140 {
2141 struct rvu *rvu = (struct rvu *)rvu_irq;
2142 u64 intr;
2143 u8 pf;
2144
2145 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2146 if (!intr)
2147 goto afvf_flr;
2148
2149 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2150 if (intr & (1ULL << pf)) {
2151 /* PF is already dead do only AF related operations */
2152 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2153 /* clear interrupt */
2154 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2155 BIT_ULL(pf));
2156 /* Disable the interrupt */
2157 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2158 BIT_ULL(pf));
2159 }
2160 }
2161
2162 afvf_flr:
2163 rvu_afvf_queue_flr_work(rvu, 0, 64);
2164 if (rvu->vfs > 64)
2165 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2166
2167 return IRQ_HANDLED;
2168 }
2169
rvu_me_handle_vfset(struct rvu * rvu,int idx,u64 intr)2170 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2171 {
2172 int vf;
2173
2174 /* Nothing to be done here other than clearing the
2175 * TRPEND bit.
2176 */
2177 for (vf = 0; vf < 64; vf++) {
2178 if (intr & (1ULL << vf)) {
2179 /* clear the trpend due to ME(master enable) */
2180 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2181 /* clear interrupt */
2182 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2183 }
2184 }
2185 }
2186
2187 /* Handles ME interrupts from VFs of AF */
rvu_me_vf_intr_handler(int irq,void * rvu_irq)2188 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2189 {
2190 struct rvu *rvu = (struct rvu *)rvu_irq;
2191 int vfset;
2192 u64 intr;
2193
2194 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2195
2196 for (vfset = 0; vfset <= 1; vfset++) {
2197 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2198 if (intr)
2199 rvu_me_handle_vfset(rvu, vfset, intr);
2200 }
2201
2202 return IRQ_HANDLED;
2203 }
2204
2205 /* Handles ME interrupts from PFs */
rvu_me_pf_intr_handler(int irq,void * rvu_irq)2206 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2207 {
2208 struct rvu *rvu = (struct rvu *)rvu_irq;
2209 u64 intr;
2210 u8 pf;
2211
2212 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2213
2214 /* Nothing to be done here other than clearing the
2215 * TRPEND bit.
2216 */
2217 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2218 if (intr & (1ULL << pf)) {
2219 /* clear the trpend due to ME(master enable) */
2220 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2221 BIT_ULL(pf));
2222 /* clear interrupt */
2223 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2224 BIT_ULL(pf));
2225 }
2226 }
2227
2228 return IRQ_HANDLED;
2229 }
2230
rvu_unregister_interrupts(struct rvu * rvu)2231 static void rvu_unregister_interrupts(struct rvu *rvu)
2232 {
2233 int irq;
2234
2235 /* Disable the Mbox interrupt */
2236 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2237 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2238
2239 /* Disable the PF FLR interrupt */
2240 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2241 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2242
2243 /* Disable the PF ME interrupt */
2244 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2245 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2246
2247 for (irq = 0; irq < rvu->num_vec; irq++) {
2248 if (rvu->irq_allocated[irq]) {
2249 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2250 rvu->irq_allocated[irq] = false;
2251 }
2252 }
2253
2254 pci_free_irq_vectors(rvu->pdev);
2255 rvu->num_vec = 0;
2256 }
2257
rvu_afvf_msix_vectors_num_ok(struct rvu * rvu)2258 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2259 {
2260 struct rvu_pfvf *pfvf = &rvu->pf[0];
2261 int offset;
2262
2263 pfvf = &rvu->pf[0];
2264 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2265
2266 /* Make sure there are enough MSIX vectors configured so that
2267 * VF interrupts can be handled. Offset equal to zero means
2268 * that PF vectors are not configured and overlapping AF vectors.
2269 */
2270 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2271 offset;
2272 }
2273
rvu_register_interrupts(struct rvu * rvu)2274 static int rvu_register_interrupts(struct rvu *rvu)
2275 {
2276 int ret, offset, pf_vec_start;
2277
2278 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2279
2280 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2281 NAME_SIZE, GFP_KERNEL);
2282 if (!rvu->irq_name)
2283 return -ENOMEM;
2284
2285 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2286 sizeof(bool), GFP_KERNEL);
2287 if (!rvu->irq_allocated)
2288 return -ENOMEM;
2289
2290 /* Enable MSI-X */
2291 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2292 rvu->num_vec, PCI_IRQ_MSIX);
2293 if (ret < 0) {
2294 dev_err(rvu->dev,
2295 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2296 rvu->num_vec, ret);
2297 return ret;
2298 }
2299
2300 /* Register mailbox interrupt handler */
2301 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2302 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2303 rvu_mbox_intr_handler, 0,
2304 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2305 if (ret) {
2306 dev_err(rvu->dev,
2307 "RVUAF: IRQ registration failed for mbox irq\n");
2308 goto fail;
2309 }
2310
2311 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2312
2313 /* Enable mailbox interrupts from all PFs */
2314 rvu_enable_mbox_intr(rvu);
2315
2316 /* Register FLR interrupt handler */
2317 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2318 "RVUAF FLR");
2319 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2320 rvu_flr_intr_handler, 0,
2321 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2322 rvu);
2323 if (ret) {
2324 dev_err(rvu->dev,
2325 "RVUAF: IRQ registration failed for FLR\n");
2326 goto fail;
2327 }
2328 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2329
2330 /* Enable FLR interrupt for all PFs*/
2331 rvu_write64(rvu, BLKADDR_RVUM,
2332 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2333
2334 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2335 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2336
2337 /* Register ME interrupt handler */
2338 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2339 "RVUAF ME");
2340 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2341 rvu_me_pf_intr_handler, 0,
2342 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2343 rvu);
2344 if (ret) {
2345 dev_err(rvu->dev,
2346 "RVUAF: IRQ registration failed for ME\n");
2347 }
2348 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2349
2350 /* Clear TRPEND bit for all PF */
2351 rvu_write64(rvu, BLKADDR_RVUM,
2352 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2353 /* Enable ME interrupt for all PFs*/
2354 rvu_write64(rvu, BLKADDR_RVUM,
2355 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2356
2357 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2358 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2359
2360 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2361 return 0;
2362
2363 /* Get PF MSIX vectors offset. */
2364 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2365 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2366
2367 /* Register MBOX0 interrupt. */
2368 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2369 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2370 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2371 rvu_mbox_intr_handler, 0,
2372 &rvu->irq_name[offset * NAME_SIZE],
2373 rvu);
2374 if (ret)
2375 dev_err(rvu->dev,
2376 "RVUAF: IRQ registration failed for Mbox0\n");
2377
2378 rvu->irq_allocated[offset] = true;
2379
2380 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2381 * simply increment current offset by 1.
2382 */
2383 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2384 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2385 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2386 rvu_mbox_intr_handler, 0,
2387 &rvu->irq_name[offset * NAME_SIZE],
2388 rvu);
2389 if (ret)
2390 dev_err(rvu->dev,
2391 "RVUAF: IRQ registration failed for Mbox1\n");
2392
2393 rvu->irq_allocated[offset] = true;
2394
2395 /* Register FLR interrupt handler for AF's VFs */
2396 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2397 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2398 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2399 rvu_flr_intr_handler, 0,
2400 &rvu->irq_name[offset * NAME_SIZE], rvu);
2401 if (ret) {
2402 dev_err(rvu->dev,
2403 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2404 goto fail;
2405 }
2406 rvu->irq_allocated[offset] = true;
2407
2408 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2409 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2410 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2411 rvu_flr_intr_handler, 0,
2412 &rvu->irq_name[offset * NAME_SIZE], rvu);
2413 if (ret) {
2414 dev_err(rvu->dev,
2415 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2416 goto fail;
2417 }
2418 rvu->irq_allocated[offset] = true;
2419
2420 /* Register ME interrupt handler for AF's VFs */
2421 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2422 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2423 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2424 rvu_me_vf_intr_handler, 0,
2425 &rvu->irq_name[offset * NAME_SIZE], rvu);
2426 if (ret) {
2427 dev_err(rvu->dev,
2428 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2429 goto fail;
2430 }
2431 rvu->irq_allocated[offset] = true;
2432
2433 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2434 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2435 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2436 rvu_me_vf_intr_handler, 0,
2437 &rvu->irq_name[offset * NAME_SIZE], rvu);
2438 if (ret) {
2439 dev_err(rvu->dev,
2440 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2441 goto fail;
2442 }
2443 rvu->irq_allocated[offset] = true;
2444 return 0;
2445
2446 fail:
2447 rvu_unregister_interrupts(rvu);
2448 return ret;
2449 }
2450
rvu_flr_wq_destroy(struct rvu * rvu)2451 static void rvu_flr_wq_destroy(struct rvu *rvu)
2452 {
2453 if (rvu->flr_wq) {
2454 flush_workqueue(rvu->flr_wq);
2455 destroy_workqueue(rvu->flr_wq);
2456 rvu->flr_wq = NULL;
2457 }
2458 }
2459
rvu_flr_init(struct rvu * rvu)2460 static int rvu_flr_init(struct rvu *rvu)
2461 {
2462 int dev, num_devs;
2463 u64 cfg;
2464 int pf;
2465
2466 /* Enable FLR for all PFs*/
2467 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2468 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2469 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2470 cfg | BIT_ULL(22));
2471 }
2472
2473 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2474 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2475 1);
2476 if (!rvu->flr_wq)
2477 return -ENOMEM;
2478
2479 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2480 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2481 sizeof(struct rvu_work), GFP_KERNEL);
2482 if (!rvu->flr_wrk) {
2483 destroy_workqueue(rvu->flr_wq);
2484 return -ENOMEM;
2485 }
2486
2487 for (dev = 0; dev < num_devs; dev++) {
2488 rvu->flr_wrk[dev].rvu = rvu;
2489 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2490 }
2491
2492 mutex_init(&rvu->flr_lock);
2493
2494 return 0;
2495 }
2496
rvu_disable_afvf_intr(struct rvu * rvu)2497 static void rvu_disable_afvf_intr(struct rvu *rvu)
2498 {
2499 int vfs = rvu->vfs;
2500
2501 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2502 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2503 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2504 if (vfs <= 64)
2505 return;
2506
2507 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2508 INTR_MASK(vfs - 64));
2509 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2510 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2511 }
2512
rvu_enable_afvf_intr(struct rvu * rvu)2513 static void rvu_enable_afvf_intr(struct rvu *rvu)
2514 {
2515 int vfs = rvu->vfs;
2516
2517 /* Clear any pending interrupts and enable AF VF interrupts for
2518 * the first 64 VFs.
2519 */
2520 /* Mbox */
2521 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2522 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2523
2524 /* FLR */
2525 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2526 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2527 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2528
2529 /* Same for remaining VFs, if any. */
2530 if (vfs <= 64)
2531 return;
2532
2533 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2534 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2535 INTR_MASK(vfs - 64));
2536
2537 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2538 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2539 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2540 }
2541
2542 #define PCI_DEVID_OCTEONTX2_LBK 0xA061
2543
lbk_get_num_chans(void)2544 static int lbk_get_num_chans(void)
2545 {
2546 struct pci_dev *pdev;
2547 void __iomem *base;
2548 int ret = -EIO;
2549
2550 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2551 NULL);
2552 if (!pdev)
2553 goto err;
2554
2555 base = pci_ioremap_bar(pdev, 0);
2556 if (!base)
2557 goto err_put;
2558
2559 /* Read number of available LBK channels from LBK(0)_CONST register. */
2560 ret = (readq(base + 0x10) >> 32) & 0xffff;
2561 iounmap(base);
2562 err_put:
2563 pci_dev_put(pdev);
2564 err:
2565 return ret;
2566 }
2567
rvu_enable_sriov(struct rvu * rvu)2568 static int rvu_enable_sriov(struct rvu *rvu)
2569 {
2570 struct pci_dev *pdev = rvu->pdev;
2571 int err, chans, vfs;
2572
2573 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2574 dev_warn(&pdev->dev,
2575 "Skipping SRIOV enablement since not enough IRQs are available\n");
2576 return 0;
2577 }
2578
2579 chans = lbk_get_num_chans();
2580 if (chans < 0)
2581 return chans;
2582
2583 vfs = pci_sriov_get_totalvfs(pdev);
2584
2585 /* Limit VFs in case we have more VFs than LBK channels available. */
2586 if (vfs > chans)
2587 vfs = chans;
2588
2589 if (!vfs)
2590 return 0;
2591
2592 /* Save VFs number for reference in VF interrupts handlers.
2593 * Since interrupts might start arriving during SRIOV enablement
2594 * ordinary API cannot be used to get number of enabled VFs.
2595 */
2596 rvu->vfs = vfs;
2597
2598 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2599 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2600 if (err)
2601 return err;
2602
2603 rvu_enable_afvf_intr(rvu);
2604 /* Make sure IRQs are enabled before SRIOV. */
2605 mb();
2606
2607 err = pci_enable_sriov(pdev, vfs);
2608 if (err) {
2609 rvu_disable_afvf_intr(rvu);
2610 rvu_mbox_destroy(&rvu->afvf_wq_info);
2611 return err;
2612 }
2613
2614 return 0;
2615 }
2616
rvu_disable_sriov(struct rvu * rvu)2617 static void rvu_disable_sriov(struct rvu *rvu)
2618 {
2619 rvu_disable_afvf_intr(rvu);
2620 rvu_mbox_destroy(&rvu->afvf_wq_info);
2621 pci_disable_sriov(rvu->pdev);
2622 }
2623
rvu_update_module_params(struct rvu * rvu)2624 static void rvu_update_module_params(struct rvu *rvu)
2625 {
2626 const char *default_pfl_name = "default";
2627
2628 strscpy(rvu->mkex_pfl_name,
2629 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2630 }
2631
rvu_probe(struct pci_dev * pdev,const struct pci_device_id * id)2632 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2633 {
2634 struct device *dev = &pdev->dev;
2635 struct rvu *rvu;
2636 int err;
2637
2638 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2639 if (!rvu)
2640 return -ENOMEM;
2641
2642 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2643 if (!rvu->hw) {
2644 devm_kfree(dev, rvu);
2645 return -ENOMEM;
2646 }
2647
2648 pci_set_drvdata(pdev, rvu);
2649 rvu->pdev = pdev;
2650 rvu->dev = &pdev->dev;
2651
2652 err = pci_enable_device(pdev);
2653 if (err) {
2654 dev_err(dev, "Failed to enable PCI device\n");
2655 goto err_freemem;
2656 }
2657
2658 err = pci_request_regions(pdev, DRV_NAME);
2659 if (err) {
2660 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2661 goto err_disable_device;
2662 }
2663
2664 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2665 if (err) {
2666 dev_err(dev, "DMA mask config failed, abort\n");
2667 goto err_release_regions;
2668 }
2669
2670 pci_set_master(pdev);
2671
2672 rvu->ptp = ptp_get();
2673 if (IS_ERR(rvu->ptp)) {
2674 err = PTR_ERR(rvu->ptp);
2675 if (err == -EPROBE_DEFER)
2676 goto err_release_regions;
2677 rvu->ptp = NULL;
2678 }
2679
2680 /* Map Admin function CSRs */
2681 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2682 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2683 if (!rvu->afreg_base || !rvu->pfreg_base) {
2684 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2685 err = -ENOMEM;
2686 goto err_put_ptp;
2687 }
2688
2689 /* Store module params in rvu structure */
2690 rvu_update_module_params(rvu);
2691
2692 /* Check which blocks the HW supports */
2693 rvu_check_block_implemented(rvu);
2694
2695 rvu_reset_all_blocks(rvu);
2696
2697 rvu_setup_hw_capabilities(rvu);
2698
2699 err = rvu_setup_hw_resources(rvu);
2700 if (err)
2701 goto err_put_ptp;
2702
2703 /* Init mailbox btw AF and PFs */
2704 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2705 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2706 rvu_afpf_mbox_up_handler);
2707 if (err)
2708 goto err_hwsetup;
2709
2710 err = rvu_flr_init(rvu);
2711 if (err)
2712 goto err_mbox;
2713
2714 err = rvu_register_interrupts(rvu);
2715 if (err)
2716 goto err_flr;
2717
2718 rvu_setup_rvum_blk_revid(rvu);
2719
2720 /* Enable AF's VFs (if any) */
2721 err = rvu_enable_sriov(rvu);
2722 if (err)
2723 goto err_irq;
2724
2725 /* Initialize debugfs */
2726 rvu_dbg_init(rvu);
2727
2728 return 0;
2729 err_irq:
2730 rvu_unregister_interrupts(rvu);
2731 err_flr:
2732 rvu_flr_wq_destroy(rvu);
2733 err_mbox:
2734 rvu_mbox_destroy(&rvu->afpf_wq_info);
2735 err_hwsetup:
2736 rvu_cgx_exit(rvu);
2737 rvu_fwdata_exit(rvu);
2738 rvu_reset_all_blocks(rvu);
2739 rvu_free_hw_resources(rvu);
2740 rvu_clear_rvum_blk_revid(rvu);
2741 err_put_ptp:
2742 ptp_put(rvu->ptp);
2743 err_release_regions:
2744 pci_release_regions(pdev);
2745 err_disable_device:
2746 pci_disable_device(pdev);
2747 err_freemem:
2748 pci_set_drvdata(pdev, NULL);
2749 devm_kfree(&pdev->dev, rvu->hw);
2750 devm_kfree(dev, rvu);
2751 return err;
2752 }
2753
rvu_remove(struct pci_dev * pdev)2754 static void rvu_remove(struct pci_dev *pdev)
2755 {
2756 struct rvu *rvu = pci_get_drvdata(pdev);
2757
2758 rvu_dbg_exit(rvu);
2759 rvu_unregister_interrupts(rvu);
2760 rvu_flr_wq_destroy(rvu);
2761 rvu_cgx_exit(rvu);
2762 rvu_fwdata_exit(rvu);
2763 rvu_mbox_destroy(&rvu->afpf_wq_info);
2764 rvu_disable_sriov(rvu);
2765 rvu_reset_all_blocks(rvu);
2766 rvu_free_hw_resources(rvu);
2767 rvu_clear_rvum_blk_revid(rvu);
2768 ptp_put(rvu->ptp);
2769 pci_release_regions(pdev);
2770 pci_disable_device(pdev);
2771 pci_set_drvdata(pdev, NULL);
2772
2773 devm_kfree(&pdev->dev, rvu->hw);
2774 devm_kfree(&pdev->dev, rvu);
2775 }
2776
2777 static struct pci_driver rvu_driver = {
2778 .name = DRV_NAME,
2779 .id_table = rvu_id_table,
2780 .probe = rvu_probe,
2781 .remove = rvu_remove,
2782 };
2783
rvu_init_module(void)2784 static int __init rvu_init_module(void)
2785 {
2786 int err;
2787
2788 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2789
2790 err = pci_register_driver(&cgx_driver);
2791 if (err < 0)
2792 return err;
2793
2794 err = pci_register_driver(&ptp_driver);
2795 if (err < 0)
2796 goto ptp_err;
2797
2798 err = pci_register_driver(&rvu_driver);
2799 if (err < 0)
2800 goto rvu_err;
2801
2802 return 0;
2803 rvu_err:
2804 pci_unregister_driver(&ptp_driver);
2805 ptp_err:
2806 pci_unregister_driver(&cgx_driver);
2807
2808 return err;
2809 }
2810
rvu_cleanup_module(void)2811 static void __exit rvu_cleanup_module(void)
2812 {
2813 pci_unregister_driver(&rvu_driver);
2814 pci_unregister_driver(&ptp_driver);
2815 pci_unregister_driver(&cgx_driver);
2816 }
2817
2818 module_init(rvu_init_module);
2819 module_exit(rvu_cleanup_module);
2820