• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 			    int type, int chan_id);
22 
23 enum mc_tbl_sz {
24 	MC_TBL_SZ_256,
25 	MC_TBL_SZ_512,
26 	MC_TBL_SZ_1K,
27 	MC_TBL_SZ_2K,
28 	MC_TBL_SZ_4K,
29 	MC_TBL_SZ_8K,
30 	MC_TBL_SZ_16K,
31 	MC_TBL_SZ_32K,
32 	MC_TBL_SZ_64K,
33 };
34 
35 enum mc_buf_cnt {
36 	MC_BUF_CNT_8,
37 	MC_BUF_CNT_16,
38 	MC_BUF_CNT_32,
39 	MC_BUF_CNT_64,
40 	MC_BUF_CNT_128,
41 	MC_BUF_CNT_256,
42 	MC_BUF_CNT_512,
43 	MC_BUF_CNT_1024,
44 	MC_BUF_CNT_2048,
45 };
46 
47 enum nix_makr_fmt_indexes {
48 	NIX_MARK_CFG_IP_DSCP_RED,
49 	NIX_MARK_CFG_IP_DSCP_YELLOW,
50 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
51 	NIX_MARK_CFG_IP_ECN_RED,
52 	NIX_MARK_CFG_IP_ECN_YELLOW,
53 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
54 	NIX_MARK_CFG_VLAN_DEI_RED,
55 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
56 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
57 	NIX_MARK_CFG_MAX,
58 };
59 
60 /* For now considering MC resources needed for broadcast
61  * pkt replication only. i.e 256 HWVFs + 12 PFs.
62  */
63 #define MC_TBL_SIZE	MC_TBL_SZ_512
64 #define MC_BUF_CNT	MC_BUF_CNT_128
65 
66 struct mce {
67 	struct hlist_node	node;
68 	u16			pcifunc;
69 };
70 
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)71 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
72 {
73 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
74 	int blkaddr;
75 
76 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
77 	if (!pfvf->nixlf || blkaddr < 0)
78 		return false;
79 	return true;
80 }
81 
rvu_get_nixlf_count(struct rvu * rvu)82 int rvu_get_nixlf_count(struct rvu *rvu)
83 {
84 	struct rvu_block *block;
85 	int blkaddr;
86 
87 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
88 	if (blkaddr < 0)
89 		return 0;
90 	block = &rvu->hw->block[blkaddr];
91 	return block->lf.max;
92 }
93 
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)94 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
95 {
96 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
97 	struct rvu_hwinfo *hw = rvu->hw;
98 	int blkaddr;
99 
100 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
101 	if (!pfvf->nixlf || blkaddr < 0)
102 		return NIX_AF_ERR_AF_LF_INVALID;
103 
104 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
105 	if (*nixlf < 0)
106 		return NIX_AF_ERR_AF_LF_INVALID;
107 
108 	if (nix_blkaddr)
109 		*nix_blkaddr = blkaddr;
110 
111 	return 0;
112 }
113 
nix_mce_list_init(struct nix_mce_list * list,int max)114 static void nix_mce_list_init(struct nix_mce_list *list, int max)
115 {
116 	INIT_HLIST_HEAD(&list->head);
117 	list->count = 0;
118 	list->max = max;
119 }
120 
nix_alloc_mce_list(struct nix_mcast * mcast,int count)121 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
122 {
123 	int idx;
124 
125 	if (!mcast)
126 		return 0;
127 
128 	idx = mcast->next_free_mce;
129 	mcast->next_free_mce += count;
130 	return idx;
131 }
132 
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)133 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
134 {
135 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
136 		return hw->nix0;
137 
138 	return NULL;
139 }
140 
nix_rx_sync(struct rvu * rvu,int blkaddr)141 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
142 {
143 	int err;
144 
145 	/*Sync all in flight RX packets to LLC/DRAM */
146 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
147 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
148 	if (err)
149 		dev_err(rvu->dev, "NIX RX software sync failed\n");
150 }
151 
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)152 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
153 			    int lvl, u16 pcifunc, u16 schq)
154 {
155 	struct rvu_hwinfo *hw = rvu->hw;
156 	struct nix_txsch *txsch;
157 	struct nix_hw *nix_hw;
158 	u16 map_func;
159 
160 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
161 	if (!nix_hw)
162 		return false;
163 
164 	txsch = &nix_hw->txsch[lvl];
165 	/* Check out of bounds */
166 	if (schq >= txsch->schq.max)
167 		return false;
168 
169 	mutex_lock(&rvu->rsrc_lock);
170 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
171 	mutex_unlock(&rvu->rsrc_lock);
172 
173 	/* TLs aggegating traffic are shared across PF and VFs */
174 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
175 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
176 			return false;
177 		else
178 			return true;
179 	}
180 
181 	if (map_func != pcifunc)
182 		return false;
183 
184 	return true;
185 }
186 
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf)187 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
188 {
189 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
190 	int pkind, pf, vf, lbkid;
191 	u8 cgx_id, lmac_id;
192 	int err;
193 
194 	pf = rvu_get_pf(pcifunc);
195 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
196 		return 0;
197 
198 	switch (type) {
199 	case NIX_INTF_TYPE_CGX:
200 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
201 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
202 
203 		pkind = rvu_npc_get_pkind(rvu, pf);
204 		if (pkind < 0) {
205 			dev_err(rvu->dev,
206 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
207 			return -EINVAL;
208 		}
209 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
210 		pfvf->tx_chan_base = pfvf->rx_chan_base;
211 		pfvf->rx_chan_cnt = 1;
212 		pfvf->tx_chan_cnt = 1;
213 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
214 		rvu_npc_set_pkind(rvu, pkind, pfvf);
215 
216 		/* By default we enable pause frames */
217 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
218 			cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
219 					       lmac_id, true, true);
220 		break;
221 	case NIX_INTF_TYPE_LBK:
222 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
223 
224 		/* If NIX1 block is present on the silicon then NIXes are
225 		 * assigned alternatively for lbk interfaces. NIX0 should
226 		 * send packets on lbk link 1 channels and NIX1 should send
227 		 * on lbk link 0 channels for the communication between
228 		 * NIX0 and NIX1.
229 		 */
230 		lbkid = 0;
231 		if (rvu->hw->lbk_links > 1)
232 			lbkid = vf & 0x1 ? 0 : 1;
233 
234 		/* Note that AF's VFs work in pairs and talk over consecutive
235 		 * loopback channels.Therefore if odd number of AF VFs are
236 		 * enabled then the last VF remains with no pair.
237 		 */
238 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
239 		pfvf->tx_chan_base = vf & 0x1 ?
240 					NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
241 					NIX_CHAN_LBK_CHX(lbkid, vf + 1);
242 		pfvf->rx_chan_cnt = 1;
243 		pfvf->tx_chan_cnt = 1;
244 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
245 					      pfvf->rx_chan_base, false);
246 		break;
247 	}
248 
249 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
250 	 * RVU PF/VF's MAC address.
251 	 */
252 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
253 				    pfvf->rx_chan_base, pfvf->mac_addr);
254 
255 	/* Add this PF_FUNC to bcast pkt replication list */
256 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
257 	if (err) {
258 		dev_err(rvu->dev,
259 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
260 			pcifunc);
261 		return err;
262 	}
263 
264 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
265 					  nixlf, pfvf->rx_chan_base);
266 	pfvf->maxlen = NIC_HW_MIN_FRS;
267 	pfvf->minlen = NIC_HW_MIN_FRS;
268 
269 	return 0;
270 }
271 
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)272 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
273 {
274 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
275 	int err;
276 
277 	pfvf->maxlen = 0;
278 	pfvf->minlen = 0;
279 	pfvf->rxvlan = false;
280 
281 	/* Remove this PF_FUNC from bcast pkt replication list */
282 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
283 	if (err) {
284 		dev_err(rvu->dev,
285 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
286 			pcifunc);
287 	}
288 
289 	/* Free and disable any MCAM entries used by this NIX LF */
290 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
291 }
292 
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)293 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
294 				    struct nix_bp_cfg_req *req,
295 				    struct msg_rsp *rsp)
296 {
297 	u16 pcifunc = req->hdr.pcifunc;
298 	struct rvu_pfvf *pfvf;
299 	int blkaddr, pf, type;
300 	u16 chan_base, chan;
301 	u64 cfg;
302 
303 	pf = rvu_get_pf(pcifunc);
304 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
305 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
306 		return 0;
307 
308 	pfvf = rvu_get_pfvf(rvu, pcifunc);
309 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
310 
311 	chan_base = pfvf->rx_chan_base + req->chan_base;
312 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
313 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
314 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
315 			    cfg & ~BIT_ULL(16));
316 	}
317 	return 0;
318 }
319 
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)320 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
321 			    int type, int chan_id)
322 {
323 	int bpid, blkaddr, lmac_chan_cnt;
324 	struct rvu_hwinfo *hw = rvu->hw;
325 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
326 	struct rvu_pfvf *pfvf;
327 	u8 cgx_id, lmac_id;
328 	u64 cfg;
329 
330 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
331 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
332 	lmac_chan_cnt = cfg & 0xFF;
333 
334 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
335 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
336 
337 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
338 
339 	/* Backpressure IDs range division
340 	 * CGX channles are mapped to (0 - 191) BPIDs
341 	 * LBK channles are mapped to (192 - 255) BPIDs
342 	 * SDP channles are mapped to (256 - 511) BPIDs
343 	 *
344 	 * Lmac channles and bpids mapped as follows
345 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
346 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
347 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
348 	 */
349 	switch (type) {
350 	case NIX_INTF_TYPE_CGX:
351 		if ((req->chan_base + req->chan_cnt) > 15)
352 			return -EINVAL;
353 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
354 		/* Assign bpid based on cgx, lmac and chan id */
355 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
356 			(lmac_id * lmac_chan_cnt) + req->chan_base;
357 
358 		if (req->bpid_per_chan)
359 			bpid += chan_id;
360 		if (bpid > cgx_bpid_cnt)
361 			return -EINVAL;
362 		break;
363 
364 	case NIX_INTF_TYPE_LBK:
365 		if ((req->chan_base + req->chan_cnt) > 63)
366 			return -EINVAL;
367 		bpid = cgx_bpid_cnt + req->chan_base;
368 		if (req->bpid_per_chan)
369 			bpid += chan_id;
370 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
371 			return -EINVAL;
372 		break;
373 	default:
374 		return -EINVAL;
375 	}
376 	return bpid;
377 }
378 
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)379 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
380 				   struct nix_bp_cfg_req *req,
381 				   struct nix_bp_cfg_rsp *rsp)
382 {
383 	int blkaddr, pf, type, chan_id = 0;
384 	u16 pcifunc = req->hdr.pcifunc;
385 	struct rvu_pfvf *pfvf;
386 	u16 chan_base, chan;
387 	s16 bpid, bpid_base;
388 	u64 cfg;
389 
390 	pf = rvu_get_pf(pcifunc);
391 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
392 
393 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
394 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
395 		return 0;
396 
397 	pfvf = rvu_get_pfvf(rvu, pcifunc);
398 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
399 
400 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
401 	chan_base = pfvf->rx_chan_base + req->chan_base;
402 	bpid = bpid_base;
403 
404 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
405 		if (bpid < 0) {
406 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
407 			return -EINVAL;
408 		}
409 
410 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
411 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
412 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
413 		chan_id++;
414 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
415 	}
416 
417 	for (chan = 0; chan < req->chan_cnt; chan++) {
418 		/* Map channel and bpid assign to it */
419 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
420 					(bpid_base & 0x3FF);
421 		if (req->bpid_per_chan)
422 			bpid_base++;
423 	}
424 	rsp->chan_cnt = req->chan_cnt;
425 
426 	return 0;
427 }
428 
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)429 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
430 				 u64 format, bool v4, u64 *fidx)
431 {
432 	struct nix_lso_format field = {0};
433 
434 	/* IP's Length field */
435 	field.layer = NIX_TXLAYER_OL3;
436 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
437 	field.offset = v4 ? 2 : 4;
438 	field.sizem1 = 1; /* i.e 2 bytes */
439 	field.alg = NIX_LSOALG_ADD_PAYLEN;
440 	rvu_write64(rvu, blkaddr,
441 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
442 		    *(u64 *)&field);
443 
444 	/* No ID field in IPv6 header */
445 	if (!v4)
446 		return;
447 
448 	/* IP's ID field */
449 	field.layer = NIX_TXLAYER_OL3;
450 	field.offset = 4;
451 	field.sizem1 = 1; /* i.e 2 bytes */
452 	field.alg = NIX_LSOALG_ADD_SEGNUM;
453 	rvu_write64(rvu, blkaddr,
454 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
455 		    *(u64 *)&field);
456 }
457 
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)458 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
459 				 u64 format, u64 *fidx)
460 {
461 	struct nix_lso_format field = {0};
462 
463 	/* TCP's sequence number field */
464 	field.layer = NIX_TXLAYER_OL4;
465 	field.offset = 4;
466 	field.sizem1 = 3; /* i.e 4 bytes */
467 	field.alg = NIX_LSOALG_ADD_OFFSET;
468 	rvu_write64(rvu, blkaddr,
469 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
470 		    *(u64 *)&field);
471 
472 	/* TCP's flags field */
473 	field.layer = NIX_TXLAYER_OL4;
474 	field.offset = 12;
475 	field.sizem1 = 1; /* 2 bytes */
476 	field.alg = NIX_LSOALG_TCP_FLAGS;
477 	rvu_write64(rvu, blkaddr,
478 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
479 		    *(u64 *)&field);
480 }
481 
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)482 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
483 {
484 	u64 cfg, idx, fidx = 0;
485 
486 	/* Get max HW supported format indices */
487 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
488 	nix_hw->lso.total = cfg;
489 
490 	/* Enable LSO */
491 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
492 	/* For TSO, set first and middle segment flags to
493 	 * mask out PSH, RST & FIN flags in TCP packet
494 	 */
495 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
496 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
497 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
498 
499 	/* Setup default static LSO formats
500 	 *
501 	 * Configure format fields for TCPv4 segmentation offload
502 	 */
503 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
504 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
505 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
506 
507 	/* Set rest of the fields to NOP */
508 	for (; fidx < 8; fidx++) {
509 		rvu_write64(rvu, blkaddr,
510 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
511 	}
512 	nix_hw->lso.in_use++;
513 
514 	/* Configure format fields for TCPv6 segmentation offload */
515 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
516 	fidx = 0;
517 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
518 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
519 
520 	/* Set rest of the fields to NOP */
521 	for (; fidx < 8; fidx++) {
522 		rvu_write64(rvu, blkaddr,
523 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
524 	}
525 	nix_hw->lso.in_use++;
526 }
527 
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)528 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
529 {
530 	kfree(pfvf->rq_bmap);
531 	kfree(pfvf->sq_bmap);
532 	kfree(pfvf->cq_bmap);
533 	if (pfvf->rq_ctx)
534 		qmem_free(rvu->dev, pfvf->rq_ctx);
535 	if (pfvf->sq_ctx)
536 		qmem_free(rvu->dev, pfvf->sq_ctx);
537 	if (pfvf->cq_ctx)
538 		qmem_free(rvu->dev, pfvf->cq_ctx);
539 	if (pfvf->rss_ctx)
540 		qmem_free(rvu->dev, pfvf->rss_ctx);
541 	if (pfvf->nix_qints_ctx)
542 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
543 	if (pfvf->cq_ints_ctx)
544 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
545 
546 	pfvf->rq_bmap = NULL;
547 	pfvf->cq_bmap = NULL;
548 	pfvf->sq_bmap = NULL;
549 	pfvf->rq_ctx = NULL;
550 	pfvf->sq_ctx = NULL;
551 	pfvf->cq_ctx = NULL;
552 	pfvf->rss_ctx = NULL;
553 	pfvf->nix_qints_ctx = NULL;
554 	pfvf->cq_ints_ctx = NULL;
555 }
556 
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask)557 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
558 			      struct rvu_pfvf *pfvf, int nixlf,
559 			      int rss_sz, int rss_grps, int hwctx_size,
560 			      u64 way_mask)
561 {
562 	int err, grp, num_indices;
563 
564 	/* RSS is not requested for this NIXLF */
565 	if (!rss_sz)
566 		return 0;
567 	num_indices = rss_sz * rss_grps;
568 
569 	/* Alloc NIX RSS HW context memory and config the base */
570 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
571 	if (err)
572 		return err;
573 
574 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
575 		    (u64)pfvf->rss_ctx->iova);
576 
577 	/* Config full RSS table size, enable RSS and caching */
578 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
579 		    BIT_ULL(36) | BIT_ULL(4) |
580 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
581 		    way_mask << 20);
582 	/* Config RSS group offset and sizes */
583 	for (grp = 0; grp < rss_grps; grp++)
584 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
585 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
586 	return 0;
587 }
588 
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)589 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
590 			       struct nix_aq_inst_s *inst)
591 {
592 	struct admin_queue *aq = block->aq;
593 	struct nix_aq_res_s *result;
594 	int timeout = 1000;
595 	u64 reg, head;
596 
597 	result = (struct nix_aq_res_s *)aq->res->base;
598 
599 	/* Get current head pointer where to append this instruction */
600 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
601 	head = (reg >> 4) & AQ_PTR_MASK;
602 
603 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
604 	       (void *)inst, aq->inst->entry_sz);
605 	memset(result, 0, sizeof(*result));
606 	/* sync into memory */
607 	wmb();
608 
609 	/* Ring the doorbell and wait for result */
610 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
611 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
612 		cpu_relax();
613 		udelay(1);
614 		timeout--;
615 		if (!timeout)
616 			return -EBUSY;
617 	}
618 
619 	if (result->compcode != NIX_AQ_COMP_GOOD)
620 		/* TODO: Replace this with some error code */
621 		return -EBUSY;
622 
623 	return 0;
624 }
625 
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)626 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
627 			       struct nix_aq_enq_rsp *rsp)
628 {
629 	struct rvu_hwinfo *hw = rvu->hw;
630 	u16 pcifunc = req->hdr.pcifunc;
631 	int nixlf, blkaddr, rc = 0;
632 	struct nix_aq_inst_s inst;
633 	struct rvu_block *block;
634 	struct admin_queue *aq;
635 	struct rvu_pfvf *pfvf;
636 	void *ctx, *mask;
637 	bool ena;
638 	u64 cfg;
639 
640 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
641 	if (blkaddr < 0)
642 		return NIX_AF_ERR_AF_LF_INVALID;
643 
644 	block = &hw->block[blkaddr];
645 	aq = block->aq;
646 	if (!aq) {
647 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
648 		return NIX_AF_ERR_AQ_ENQUEUE;
649 	}
650 
651 	pfvf = rvu_get_pfvf(rvu, pcifunc);
652 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
653 
654 	/* Skip NIXLF check for broadcast MCE entry init */
655 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
656 		if (!pfvf->nixlf || nixlf < 0)
657 			return NIX_AF_ERR_AF_LF_INVALID;
658 	}
659 
660 	switch (req->ctype) {
661 	case NIX_AQ_CTYPE_RQ:
662 		/* Check if index exceeds max no of queues */
663 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
664 			rc = NIX_AF_ERR_AQ_ENQUEUE;
665 		break;
666 	case NIX_AQ_CTYPE_SQ:
667 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
668 			rc = NIX_AF_ERR_AQ_ENQUEUE;
669 		break;
670 	case NIX_AQ_CTYPE_CQ:
671 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
672 			rc = NIX_AF_ERR_AQ_ENQUEUE;
673 		break;
674 	case NIX_AQ_CTYPE_RSS:
675 		/* Check if RSS is enabled and qidx is within range */
676 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
677 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
678 		    (req->qidx >= (256UL << (cfg & 0xF))))
679 			rc = NIX_AF_ERR_AQ_ENQUEUE;
680 		break;
681 	case NIX_AQ_CTYPE_MCE:
682 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
683 		/* Check if index exceeds MCE list length */
684 		if (!hw->nix0->mcast.mce_ctx ||
685 		    (req->qidx >= (256UL << (cfg & 0xF))))
686 			rc = NIX_AF_ERR_AQ_ENQUEUE;
687 
688 		/* Adding multicast lists for requests from PF/VFs is not
689 		 * yet supported, so ignore this.
690 		 */
691 		if (rsp)
692 			rc = NIX_AF_ERR_AQ_ENQUEUE;
693 		break;
694 	default:
695 		rc = NIX_AF_ERR_AQ_ENQUEUE;
696 	}
697 
698 	if (rc)
699 		return rc;
700 
701 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
702 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
703 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
704 	     (req->op == NIX_AQ_INSTOP_WRITE &&
705 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
706 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
707 				     pcifunc, req->sq.smq))
708 			return NIX_AF_ERR_AQ_ENQUEUE;
709 	}
710 
711 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
712 	inst.lf = nixlf;
713 	inst.cindex = req->qidx;
714 	inst.ctype = req->ctype;
715 	inst.op = req->op;
716 	/* Currently we are not supporting enqueuing multiple instructions,
717 	 * so always choose first entry in result memory.
718 	 */
719 	inst.res_addr = (u64)aq->res->iova;
720 
721 	/* Hardware uses same aq->res->base for updating result of
722 	 * previous instruction hence wait here till it is done.
723 	 */
724 	spin_lock(&aq->lock);
725 
726 	/* Clean result + context memory */
727 	memset(aq->res->base, 0, aq->res->entry_sz);
728 	/* Context needs to be written at RES_ADDR + 128 */
729 	ctx = aq->res->base + 128;
730 	/* Mask needs to be written at RES_ADDR + 256 */
731 	mask = aq->res->base + 256;
732 
733 	switch (req->op) {
734 	case NIX_AQ_INSTOP_WRITE:
735 		if (req->ctype == NIX_AQ_CTYPE_RQ)
736 			memcpy(mask, &req->rq_mask,
737 			       sizeof(struct nix_rq_ctx_s));
738 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
739 			memcpy(mask, &req->sq_mask,
740 			       sizeof(struct nix_sq_ctx_s));
741 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
742 			memcpy(mask, &req->cq_mask,
743 			       sizeof(struct nix_cq_ctx_s));
744 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
745 			memcpy(mask, &req->rss_mask,
746 			       sizeof(struct nix_rsse_s));
747 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
748 			memcpy(mask, &req->mce_mask,
749 			       sizeof(struct nix_rx_mce_s));
750 		fallthrough;
751 	case NIX_AQ_INSTOP_INIT:
752 		if (req->ctype == NIX_AQ_CTYPE_RQ)
753 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
754 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
755 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
756 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
757 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
758 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
759 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
760 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
761 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
762 		break;
763 	case NIX_AQ_INSTOP_NOP:
764 	case NIX_AQ_INSTOP_READ:
765 	case NIX_AQ_INSTOP_LOCK:
766 	case NIX_AQ_INSTOP_UNLOCK:
767 		break;
768 	default:
769 		rc = NIX_AF_ERR_AQ_ENQUEUE;
770 		spin_unlock(&aq->lock);
771 		return rc;
772 	}
773 
774 	/* Submit the instruction to AQ */
775 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
776 	if (rc) {
777 		spin_unlock(&aq->lock);
778 		return rc;
779 	}
780 
781 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
782 	if (req->op == NIX_AQ_INSTOP_INIT) {
783 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
784 			__set_bit(req->qidx, pfvf->rq_bmap);
785 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
786 			__set_bit(req->qidx, pfvf->sq_bmap);
787 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
788 			__set_bit(req->qidx, pfvf->cq_bmap);
789 	}
790 
791 	if (req->op == NIX_AQ_INSTOP_WRITE) {
792 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
793 			ena = (req->rq.ena & req->rq_mask.ena) |
794 				(test_bit(req->qidx, pfvf->rq_bmap) &
795 				~req->rq_mask.ena);
796 			if (ena)
797 				__set_bit(req->qidx, pfvf->rq_bmap);
798 			else
799 				__clear_bit(req->qidx, pfvf->rq_bmap);
800 		}
801 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
802 			ena = (req->rq.ena & req->sq_mask.ena) |
803 				(test_bit(req->qidx, pfvf->sq_bmap) &
804 				~req->sq_mask.ena);
805 			if (ena)
806 				__set_bit(req->qidx, pfvf->sq_bmap);
807 			else
808 				__clear_bit(req->qidx, pfvf->sq_bmap);
809 		}
810 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
811 			ena = (req->rq.ena & req->cq_mask.ena) |
812 				(test_bit(req->qidx, pfvf->cq_bmap) &
813 				~req->cq_mask.ena);
814 			if (ena)
815 				__set_bit(req->qidx, pfvf->cq_bmap);
816 			else
817 				__clear_bit(req->qidx, pfvf->cq_bmap);
818 		}
819 	}
820 
821 	if (rsp) {
822 		/* Copy read context into mailbox */
823 		if (req->op == NIX_AQ_INSTOP_READ) {
824 			if (req->ctype == NIX_AQ_CTYPE_RQ)
825 				memcpy(&rsp->rq, ctx,
826 				       sizeof(struct nix_rq_ctx_s));
827 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
828 				memcpy(&rsp->sq, ctx,
829 				       sizeof(struct nix_sq_ctx_s));
830 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
831 				memcpy(&rsp->cq, ctx,
832 				       sizeof(struct nix_cq_ctx_s));
833 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
834 				memcpy(&rsp->rss, ctx,
835 				       sizeof(struct nix_rsse_s));
836 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
837 				memcpy(&rsp->mce, ctx,
838 				       sizeof(struct nix_rx_mce_s));
839 		}
840 	}
841 
842 	spin_unlock(&aq->lock);
843 	return 0;
844 }
845 
nix_get_ctx_name(int ctype)846 static const char *nix_get_ctx_name(int ctype)
847 {
848 	switch (ctype) {
849 	case NIX_AQ_CTYPE_CQ:
850 		return "CQ";
851 	case NIX_AQ_CTYPE_SQ:
852 		return "SQ";
853 	case NIX_AQ_CTYPE_RQ:
854 		return "RQ";
855 	case NIX_AQ_CTYPE_RSS:
856 		return "RSS";
857 	}
858 	return "";
859 }
860 
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)861 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
862 {
863 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
864 	struct nix_aq_enq_req aq_req;
865 	unsigned long *bmap;
866 	int qidx, q_cnt = 0;
867 	int err = 0, rc;
868 
869 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
870 		return NIX_AF_ERR_AQ_ENQUEUE;
871 
872 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
873 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
874 
875 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
876 		aq_req.cq.ena = 0;
877 		aq_req.cq_mask.ena = 1;
878 		aq_req.cq.bp_ena = 0;
879 		aq_req.cq_mask.bp_ena = 1;
880 		q_cnt = pfvf->cq_ctx->qsize;
881 		bmap = pfvf->cq_bmap;
882 	}
883 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
884 		aq_req.sq.ena = 0;
885 		aq_req.sq_mask.ena = 1;
886 		q_cnt = pfvf->sq_ctx->qsize;
887 		bmap = pfvf->sq_bmap;
888 	}
889 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
890 		aq_req.rq.ena = 0;
891 		aq_req.rq_mask.ena = 1;
892 		q_cnt = pfvf->rq_ctx->qsize;
893 		bmap = pfvf->rq_bmap;
894 	}
895 
896 	aq_req.ctype = req->ctype;
897 	aq_req.op = NIX_AQ_INSTOP_WRITE;
898 
899 	for (qidx = 0; qidx < q_cnt; qidx++) {
900 		if (!test_bit(qidx, bmap))
901 			continue;
902 		aq_req.qidx = qidx;
903 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
904 		if (rc) {
905 			err = rc;
906 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
907 				nix_get_ctx_name(req->ctype), qidx);
908 		}
909 	}
910 
911 	return err;
912 }
913 
914 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)915 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
916 {
917 	struct nix_aq_enq_req lock_ctx_req;
918 	int err;
919 
920 	if (req->op != NIX_AQ_INSTOP_INIT)
921 		return 0;
922 
923 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
924 	    req->ctype == NIX_AQ_CTYPE_DYNO)
925 		return 0;
926 
927 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
928 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
929 	lock_ctx_req.ctype = req->ctype;
930 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
931 	lock_ctx_req.qidx = req->qidx;
932 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
933 	if (err)
934 		dev_err(rvu->dev,
935 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
936 			req->hdr.pcifunc,
937 			nix_get_ctx_name(req->ctype), req->qidx);
938 	return err;
939 }
940 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)941 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
942 				struct nix_aq_enq_req *req,
943 				struct nix_aq_enq_rsp *rsp)
944 {
945 	int err;
946 
947 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
948 	if (!err)
949 		err = nix_lf_hwctx_lockdown(rvu, req);
950 	return err;
951 }
952 #else
953 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)954 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
955 				struct nix_aq_enq_req *req,
956 				struct nix_aq_enq_rsp *rsp)
957 {
958 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
959 }
960 #endif
961 
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)962 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
963 				       struct hwctx_disable_req *req,
964 				       struct msg_rsp *rsp)
965 {
966 	return nix_lf_hwctx_disable(rvu, req);
967 }
968 
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)969 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
970 				  struct nix_lf_alloc_req *req,
971 				  struct nix_lf_alloc_rsp *rsp)
972 {
973 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
974 	struct rvu_hwinfo *hw = rvu->hw;
975 	u16 pcifunc = req->hdr.pcifunc;
976 	struct rvu_block *block;
977 	struct rvu_pfvf *pfvf;
978 	u64 cfg, ctx_cfg;
979 	int blkaddr;
980 
981 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
982 		return NIX_AF_ERR_PARAM;
983 
984 	if (req->way_mask)
985 		req->way_mask &= 0xFFFF;
986 
987 	pfvf = rvu_get_pfvf(rvu, pcifunc);
988 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
989 	if (!pfvf->nixlf || blkaddr < 0)
990 		return NIX_AF_ERR_AF_LF_INVALID;
991 
992 	block = &hw->block[blkaddr];
993 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
994 	if (nixlf < 0)
995 		return NIX_AF_ERR_AF_LF_INVALID;
996 
997 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
998 	if (req->npa_func) {
999 		/* If default, use 'this' NIXLF's PFFUNC */
1000 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1001 			req->npa_func = pcifunc;
1002 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1003 			return NIX_AF_INVAL_NPA_PF_FUNC;
1004 	}
1005 
1006 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1007 	if (req->sso_func) {
1008 		/* If default, use 'this' NIXLF's PFFUNC */
1009 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1010 			req->sso_func = pcifunc;
1011 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1012 			return NIX_AF_INVAL_SSO_PF_FUNC;
1013 	}
1014 
1015 	/* If RSS is being enabled, check if requested config is valid.
1016 	 * RSS table size should be power of two, otherwise
1017 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1018 	 * won't be able to use entire table.
1019 	 */
1020 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1021 			    !is_power_of_2(req->rss_sz)))
1022 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1023 
1024 	if (req->rss_sz &&
1025 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1026 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1027 
1028 	/* Reset this NIX LF */
1029 	err = rvu_lf_reset(rvu, block, nixlf);
1030 	if (err) {
1031 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1032 			block->addr - BLKADDR_NIX0, nixlf);
1033 		return NIX_AF_ERR_LF_RESET;
1034 	}
1035 
1036 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1037 
1038 	/* Alloc NIX RQ HW context memory and config the base */
1039 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1040 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1041 	if (err)
1042 		goto free_mem;
1043 
1044 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1045 	if (!pfvf->rq_bmap)
1046 		goto free_mem;
1047 
1048 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1049 		    (u64)pfvf->rq_ctx->iova);
1050 
1051 	/* Set caching and queue count in HW */
1052 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1053 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1054 
1055 	/* Alloc NIX SQ HW context memory and config the base */
1056 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1057 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1058 	if (err)
1059 		goto free_mem;
1060 
1061 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1062 	if (!pfvf->sq_bmap)
1063 		goto free_mem;
1064 
1065 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1066 		    (u64)pfvf->sq_ctx->iova);
1067 
1068 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1069 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1070 
1071 	/* Alloc NIX CQ HW context memory and config the base */
1072 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1073 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1074 	if (err)
1075 		goto free_mem;
1076 
1077 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1078 	if (!pfvf->cq_bmap)
1079 		goto free_mem;
1080 
1081 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1082 		    (u64)pfvf->cq_ctx->iova);
1083 
1084 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1085 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1086 
1087 	/* Initialize receive side scaling (RSS) */
1088 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1089 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1090 				 req->rss_grps, hwctx_size, req->way_mask);
1091 	if (err)
1092 		goto free_mem;
1093 
1094 	/* Alloc memory for CQINT's HW contexts */
1095 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1096 	qints = (cfg >> 24) & 0xFFF;
1097 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1098 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1099 	if (err)
1100 		goto free_mem;
1101 
1102 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1103 		    (u64)pfvf->cq_ints_ctx->iova);
1104 
1105 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1106 		    BIT_ULL(36) | req->way_mask << 20);
1107 
1108 	/* Alloc memory for QINT's HW contexts */
1109 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1110 	qints = (cfg >> 12) & 0xFFF;
1111 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1112 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1113 	if (err)
1114 		goto free_mem;
1115 
1116 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1117 		    (u64)pfvf->nix_qints_ctx->iova);
1118 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1119 		    BIT_ULL(36) | req->way_mask << 20);
1120 
1121 	/* Setup VLANX TPID's.
1122 	 * Use VLAN1 for 802.1Q
1123 	 * and VLAN0 for 802.1AD.
1124 	 */
1125 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1126 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1127 
1128 	/* Enable LMTST for this NIX LF */
1129 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1130 
1131 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1132 	if (req->npa_func)
1133 		cfg = req->npa_func;
1134 	if (req->sso_func)
1135 		cfg |= (u64)req->sso_func << 16;
1136 
1137 	cfg |= (u64)req->xqe_sz << 33;
1138 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1139 
1140 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1141 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1142 
1143 	/* Configure pkind for TX parse config */
1144 	cfg = NPC_TX_DEF_PKIND;
1145 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1146 
1147 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1148 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1149 	if (err)
1150 		goto free_mem;
1151 
1152 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1153 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1154 
1155 	goto exit;
1156 
1157 free_mem:
1158 	nix_ctx_free(rvu, pfvf);
1159 	rc = -ENOMEM;
1160 
1161 exit:
1162 	/* Set macaddr of this PF/VF */
1163 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1164 
1165 	/* set SQB size info */
1166 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1167 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1168 	rsp->rx_chan_base = pfvf->rx_chan_base;
1169 	rsp->tx_chan_base = pfvf->tx_chan_base;
1170 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1171 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1172 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1173 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1174 	/* Get HW supported stat count */
1175 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1176 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1177 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1178 	/* Get count of CQ IRQs and error IRQs supported per LF */
1179 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1180 	rsp->qints = ((cfg >> 12) & 0xFFF);
1181 	rsp->cints = ((cfg >> 24) & 0xFFF);
1182 	return rc;
1183 }
1184 
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1185 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1186 				 struct msg_rsp *rsp)
1187 {
1188 	struct rvu_hwinfo *hw = rvu->hw;
1189 	u16 pcifunc = req->hdr.pcifunc;
1190 	struct rvu_block *block;
1191 	int blkaddr, nixlf, err;
1192 	struct rvu_pfvf *pfvf;
1193 
1194 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1195 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1196 	if (!pfvf->nixlf || blkaddr < 0)
1197 		return NIX_AF_ERR_AF_LF_INVALID;
1198 
1199 	block = &hw->block[blkaddr];
1200 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1201 	if (nixlf < 0)
1202 		return NIX_AF_ERR_AF_LF_INVALID;
1203 
1204 	nix_interface_deinit(rvu, pcifunc, nixlf);
1205 
1206 	/* Reset this NIX LF */
1207 	err = rvu_lf_reset(rvu, block, nixlf);
1208 	if (err) {
1209 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1210 			block->addr - BLKADDR_NIX0, nixlf);
1211 		return NIX_AF_ERR_LF_RESET;
1212 	}
1213 
1214 	nix_ctx_free(rvu, pfvf);
1215 
1216 	return 0;
1217 }
1218 
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1219 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1220 					 struct nix_mark_format_cfg  *req,
1221 					 struct nix_mark_format_cfg_rsp *rsp)
1222 {
1223 	u16 pcifunc = req->hdr.pcifunc;
1224 	struct nix_hw *nix_hw;
1225 	struct rvu_pfvf *pfvf;
1226 	int blkaddr, rc;
1227 	u32 cfg;
1228 
1229 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1230 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1231 	if (!pfvf->nixlf || blkaddr < 0)
1232 		return NIX_AF_ERR_AF_LF_INVALID;
1233 
1234 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1235 	if (!nix_hw)
1236 		return -EINVAL;
1237 
1238 	cfg = (((u32)req->offset & 0x7) << 16) |
1239 	      (((u32)req->y_mask & 0xF) << 12) |
1240 	      (((u32)req->y_val & 0xF) << 8) |
1241 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1242 
1243 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1244 	if (rc < 0) {
1245 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1246 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1247 		return NIX_AF_ERR_MARK_CFG_FAIL;
1248 	}
1249 
1250 	rsp->mark_format_idx = rc;
1251 	return 0;
1252 }
1253 
1254 /* Disable shaping of pkts by a scheduler queue
1255  * at a given scheduler level.
1256  */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int lvl,int schq)1257 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1258 				 int lvl, int schq)
1259 {
1260 	u64  cir_reg = 0, pir_reg = 0;
1261 	u64  cfg;
1262 
1263 	switch (lvl) {
1264 	case NIX_TXSCH_LVL_TL1:
1265 		cir_reg = NIX_AF_TL1X_CIR(schq);
1266 		pir_reg = 0; /* PIR not available at TL1 */
1267 		break;
1268 	case NIX_TXSCH_LVL_TL2:
1269 		cir_reg = NIX_AF_TL2X_CIR(schq);
1270 		pir_reg = NIX_AF_TL2X_PIR(schq);
1271 		break;
1272 	case NIX_TXSCH_LVL_TL3:
1273 		cir_reg = NIX_AF_TL3X_CIR(schq);
1274 		pir_reg = NIX_AF_TL3X_PIR(schq);
1275 		break;
1276 	case NIX_TXSCH_LVL_TL4:
1277 		cir_reg = NIX_AF_TL4X_CIR(schq);
1278 		pir_reg = NIX_AF_TL4X_PIR(schq);
1279 		break;
1280 	}
1281 
1282 	if (!cir_reg)
1283 		return;
1284 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1285 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1286 
1287 	if (!pir_reg)
1288 		return;
1289 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1290 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1291 }
1292 
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1293 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1294 				 int lvl, int schq)
1295 {
1296 	struct rvu_hwinfo *hw = rvu->hw;
1297 	int link;
1298 
1299 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1300 		return;
1301 
1302 	/* Reset TL4's SDP link config */
1303 	if (lvl == NIX_TXSCH_LVL_TL4)
1304 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1305 
1306 	if (lvl != NIX_TXSCH_LVL_TL2)
1307 		return;
1308 
1309 	/* Reset TL2's CGX or LBK link config */
1310 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1311 		rvu_write64(rvu, blkaddr,
1312 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1313 }
1314 
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)1315 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1316 {
1317 	struct rvu_hwinfo *hw = rvu->hw;
1318 	int pf = rvu_get_pf(pcifunc);
1319 	u8 cgx_id = 0, lmac_id = 0;
1320 
1321 	if (is_afvf(pcifunc)) {/* LBK links */
1322 		return hw->cgx_links;
1323 	} else if (is_pf_cgxmapped(rvu, pf)) {
1324 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1325 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1326 	}
1327 
1328 	/* SDP link */
1329 	return hw->cgx_links + hw->lbk_links;
1330 }
1331 
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)1332 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1333 				 int link, int *start, int *end)
1334 {
1335 	struct rvu_hwinfo *hw = rvu->hw;
1336 	int pf = rvu_get_pf(pcifunc);
1337 
1338 	if (is_afvf(pcifunc)) { /* LBK links */
1339 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1340 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1341 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1342 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1343 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1344 	} else { /* SDP link */
1345 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1346 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1347 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1348 	}
1349 }
1350 
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)1351 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1352 				      struct nix_hw *nix_hw,
1353 				      struct nix_txsch_alloc_req *req)
1354 {
1355 	struct rvu_hwinfo *hw = rvu->hw;
1356 	int schq, req_schq, free_cnt;
1357 	struct nix_txsch *txsch;
1358 	int link, start, end;
1359 
1360 	txsch = &nix_hw->txsch[lvl];
1361 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1362 
1363 	if (!req_schq)
1364 		return 0;
1365 
1366 	link = nix_get_tx_link(rvu, pcifunc);
1367 
1368 	/* For traffic aggregating scheduler level, one queue is enough */
1369 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1370 		if (req_schq != 1)
1371 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1372 		return 0;
1373 	}
1374 
1375 	/* Get free SCHQ count and check if request can be accomodated */
1376 	if (hw->cap.nix_fixed_txschq_mapping) {
1377 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1378 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1379 		if (end <= txsch->schq.max && schq < end &&
1380 		    !test_bit(schq, txsch->schq.bmap))
1381 			free_cnt = 1;
1382 		else
1383 			free_cnt = 0;
1384 	} else {
1385 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1386 	}
1387 
1388 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1389 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1390 
1391 	/* If contiguous queues are needed, check for availability */
1392 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1393 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1394 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1395 
1396 	return 0;
1397 }
1398 
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)1399 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1400 			    struct nix_txsch_alloc_rsp *rsp,
1401 			    int lvl, int start, int end)
1402 {
1403 	struct rvu_hwinfo *hw = rvu->hw;
1404 	u16 pcifunc = rsp->hdr.pcifunc;
1405 	int idx, schq;
1406 
1407 	/* For traffic aggregating levels, queue alloc is based
1408 	 * on transmit link to which PF_FUNC is mapped to.
1409 	 */
1410 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1411 		/* A single TL queue is allocated */
1412 		if (rsp->schq_contig[lvl]) {
1413 			rsp->schq_contig[lvl] = 1;
1414 			rsp->schq_contig_list[lvl][0] = start;
1415 		}
1416 
1417 		/* Both contig and non-contig reqs doesn't make sense here */
1418 		if (rsp->schq_contig[lvl])
1419 			rsp->schq[lvl] = 0;
1420 
1421 		if (rsp->schq[lvl]) {
1422 			rsp->schq[lvl] = 1;
1423 			rsp->schq_list[lvl][0] = start;
1424 		}
1425 		return;
1426 	}
1427 
1428 	/* Adjust the queue request count if HW supports
1429 	 * only one queue per level configuration.
1430 	 */
1431 	if (hw->cap.nix_fixed_txschq_mapping) {
1432 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1433 		schq = start + idx;
1434 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1435 			rsp->schq_contig[lvl] = 0;
1436 			rsp->schq[lvl] = 0;
1437 			return;
1438 		}
1439 
1440 		if (rsp->schq_contig[lvl]) {
1441 			rsp->schq_contig[lvl] = 1;
1442 			set_bit(schq, txsch->schq.bmap);
1443 			rsp->schq_contig_list[lvl][0] = schq;
1444 			rsp->schq[lvl] = 0;
1445 		} else if (rsp->schq[lvl]) {
1446 			rsp->schq[lvl] = 1;
1447 			set_bit(schq, txsch->schq.bmap);
1448 			rsp->schq_list[lvl][0] = schq;
1449 		}
1450 		return;
1451 	}
1452 
1453 	/* Allocate contiguous queue indices requesty first */
1454 	if (rsp->schq_contig[lvl]) {
1455 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1456 						  txsch->schq.max, start,
1457 						  rsp->schq_contig[lvl], 0);
1458 		if (schq >= end)
1459 			rsp->schq_contig[lvl] = 0;
1460 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1461 			set_bit(schq, txsch->schq.bmap);
1462 			rsp->schq_contig_list[lvl][idx] = schq;
1463 			schq++;
1464 		}
1465 	}
1466 
1467 	/* Allocate non-contiguous queue indices */
1468 	if (rsp->schq[lvl]) {
1469 		idx = 0;
1470 		for (schq = start; schq < end; schq++) {
1471 			if (!test_bit(schq, txsch->schq.bmap)) {
1472 				set_bit(schq, txsch->schq.bmap);
1473 				rsp->schq_list[lvl][idx++] = schq;
1474 			}
1475 			if (idx == rsp->schq[lvl])
1476 				break;
1477 		}
1478 		/* Update how many were allocated */
1479 		rsp->schq[lvl] = idx;
1480 	}
1481 }
1482 
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)1483 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1484 				     struct nix_txsch_alloc_req *req,
1485 				     struct nix_txsch_alloc_rsp *rsp)
1486 {
1487 	struct rvu_hwinfo *hw = rvu->hw;
1488 	u16 pcifunc = req->hdr.pcifunc;
1489 	int link, blkaddr, rc = 0;
1490 	int lvl, idx, start, end;
1491 	struct nix_txsch *txsch;
1492 	struct rvu_pfvf *pfvf;
1493 	struct nix_hw *nix_hw;
1494 	u32 *pfvf_map;
1495 	u16 schq;
1496 
1497 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1498 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1499 	if (!pfvf->nixlf || blkaddr < 0)
1500 		return NIX_AF_ERR_AF_LF_INVALID;
1501 
1502 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1503 	if (!nix_hw)
1504 		return -EINVAL;
1505 
1506 	mutex_lock(&rvu->rsrc_lock);
1507 
1508 	/* Check if request is valid as per HW capabilities
1509 	 * and can be accomodated.
1510 	 */
1511 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1512 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1513 		if (rc)
1514 			goto err;
1515 	}
1516 
1517 	/* Allocate requested Tx scheduler queues */
1518 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1519 		txsch = &nix_hw->txsch[lvl];
1520 		pfvf_map = txsch->pfvf_map;
1521 
1522 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1523 			continue;
1524 
1525 		rsp->schq[lvl] = req->schq[lvl];
1526 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1527 
1528 		link = nix_get_tx_link(rvu, pcifunc);
1529 
1530 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1531 			start = link;
1532 			end = link;
1533 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1534 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1535 		} else {
1536 			start = 0;
1537 			end = txsch->schq.max;
1538 		}
1539 
1540 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1541 
1542 		/* Reset queue config */
1543 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1544 			schq = rsp->schq_contig_list[lvl][idx];
1545 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1546 			    NIX_TXSCHQ_CFG_DONE))
1547 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1548 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1549 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1550 		}
1551 
1552 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1553 			schq = rsp->schq_list[lvl][idx];
1554 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1555 			    NIX_TXSCHQ_CFG_DONE))
1556 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1557 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1558 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1559 		}
1560 	}
1561 
1562 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1563 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1564 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1565 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1566 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1567 	goto exit;
1568 err:
1569 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1570 exit:
1571 	mutex_unlock(&rvu->rsrc_lock);
1572 	return rc;
1573 }
1574 
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)1575 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1576 			  int smq, u16 pcifunc, int nixlf)
1577 {
1578 	int pf = rvu_get_pf(pcifunc);
1579 	u8 cgx_id = 0, lmac_id = 0;
1580 	int err, restore_tx_en = 0;
1581 	u64 cfg;
1582 
1583 	/* enable cgx tx if disabled */
1584 	if (is_pf_cgxmapped(rvu, pf)) {
1585 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1586 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1587 						    lmac_id, true);
1588 	}
1589 
1590 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1591 	/* Do SMQ flush and set enqueue xoff */
1592 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1593 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1594 
1595 	/* Disable backpressure from physical link,
1596 	 * otherwise SMQ flush may stall.
1597 	 */
1598 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1599 
1600 	/* Wait for flush to complete */
1601 	err = rvu_poll_reg(rvu, blkaddr,
1602 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1603 	if (err)
1604 		dev_err(rvu->dev,
1605 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1606 
1607 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1608 	/* restore cgx tx state */
1609 	if (restore_tx_en)
1610 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1611 }
1612 
nix_txschq_free(struct rvu * rvu,u16 pcifunc)1613 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1614 {
1615 	int blkaddr, nixlf, lvl, schq, err;
1616 	struct rvu_hwinfo *hw = rvu->hw;
1617 	struct nix_txsch *txsch;
1618 	struct nix_hw *nix_hw;
1619 
1620 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1621 	if (blkaddr < 0)
1622 		return NIX_AF_ERR_AF_LF_INVALID;
1623 
1624 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1625 	if (!nix_hw)
1626 		return -EINVAL;
1627 
1628 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1629 	if (nixlf < 0)
1630 		return NIX_AF_ERR_AF_LF_INVALID;
1631 
1632 	/* Disable TL2/3 queue links before SMQ flush*/
1633 	mutex_lock(&rvu->rsrc_lock);
1634 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1635 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1636 			continue;
1637 
1638 		txsch = &nix_hw->txsch[lvl];
1639 		for (schq = 0; schq < txsch->schq.max; schq++) {
1640 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1641 				continue;
1642 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1643 		}
1644 	}
1645 
1646 	/* Flush SMQs */
1647 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1648 	for (schq = 0; schq < txsch->schq.max; schq++) {
1649 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1650 			continue;
1651 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1652 	}
1653 
1654 	/* Now free scheduler queues to free pool */
1655 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1656 		 /* TLs above aggregation level are shared across all PF
1657 		  * and it's VFs, hence skip freeing them.
1658 		  */
1659 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1660 			continue;
1661 
1662 		txsch = &nix_hw->txsch[lvl];
1663 		for (schq = 0; schq < txsch->schq.max; schq++) {
1664 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1665 				continue;
1666 			rvu_free_rsrc(&txsch->schq, schq);
1667 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1668 		}
1669 	}
1670 	mutex_unlock(&rvu->rsrc_lock);
1671 
1672 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1673 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1674 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1675 	if (err)
1676 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1677 
1678 	return 0;
1679 }
1680 
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)1681 static int nix_txschq_free_one(struct rvu *rvu,
1682 			       struct nix_txsch_free_req *req)
1683 {
1684 	struct rvu_hwinfo *hw = rvu->hw;
1685 	u16 pcifunc = req->hdr.pcifunc;
1686 	int lvl, schq, nixlf, blkaddr;
1687 	struct nix_txsch *txsch;
1688 	struct nix_hw *nix_hw;
1689 	u32 *pfvf_map;
1690 
1691 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1692 	if (blkaddr < 0)
1693 		return NIX_AF_ERR_AF_LF_INVALID;
1694 
1695 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1696 	if (!nix_hw)
1697 		return -EINVAL;
1698 
1699 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1700 	if (nixlf < 0)
1701 		return NIX_AF_ERR_AF_LF_INVALID;
1702 
1703 	lvl = req->schq_lvl;
1704 	schq = req->schq;
1705 	txsch = &nix_hw->txsch[lvl];
1706 
1707 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1708 		return 0;
1709 
1710 	pfvf_map = txsch->pfvf_map;
1711 	mutex_lock(&rvu->rsrc_lock);
1712 
1713 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1714 		mutex_unlock(&rvu->rsrc_lock);
1715 		goto err;
1716 	}
1717 
1718 	/* Flush if it is a SMQ. Onus of disabling
1719 	 * TL2/3 queue links before SMQ flush is on user
1720 	 */
1721 	if (lvl == NIX_TXSCH_LVL_SMQ)
1722 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1723 
1724 	/* Free the resource */
1725 	rvu_free_rsrc(&txsch->schq, schq);
1726 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1727 	mutex_unlock(&rvu->rsrc_lock);
1728 	return 0;
1729 err:
1730 	return NIX_AF_ERR_TLX_INVALID;
1731 }
1732 
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)1733 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1734 				    struct nix_txsch_free_req *req,
1735 				    struct msg_rsp *rsp)
1736 {
1737 	if (req->flags & TXSCHQ_FREE_ALL)
1738 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1739 	else
1740 		return nix_txschq_free_one(rvu, req);
1741 }
1742 
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)1743 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1744 				      int lvl, u64 reg, u64 regval)
1745 {
1746 	u64 regbase = reg & 0xFFFF;
1747 	u16 schq, parent;
1748 
1749 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1750 		return false;
1751 
1752 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1753 	/* Check if this schq belongs to this PF/VF or not */
1754 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1755 		return false;
1756 
1757 	parent = (regval >> 16) & 0x1FF;
1758 	/* Validate MDQ's TL4 parent */
1759 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1760 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1761 		return false;
1762 
1763 	/* Validate TL4's TL3 parent */
1764 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1765 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1766 		return false;
1767 
1768 	/* Validate TL3's TL2 parent */
1769 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1770 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1771 		return false;
1772 
1773 	/* Validate TL2's TL1 parent */
1774 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1775 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1776 		return false;
1777 
1778 	return true;
1779 }
1780 
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)1781 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1782 {
1783 	u64 regbase;
1784 
1785 	if (hw->cap.nix_shaping)
1786 		return true;
1787 
1788 	/* If shaping and coloring is not supported, then
1789 	 * *_CIR and *_PIR registers should not be configured.
1790 	 */
1791 	regbase = reg & 0xFFFF;
1792 
1793 	switch (lvl) {
1794 	case NIX_TXSCH_LVL_TL1:
1795 		if (regbase == NIX_AF_TL1X_CIR(0))
1796 			return false;
1797 		break;
1798 	case NIX_TXSCH_LVL_TL2:
1799 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1800 		    regbase == NIX_AF_TL2X_PIR(0))
1801 			return false;
1802 		break;
1803 	case NIX_TXSCH_LVL_TL3:
1804 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1805 		    regbase == NIX_AF_TL3X_PIR(0))
1806 			return false;
1807 		break;
1808 	case NIX_TXSCH_LVL_TL4:
1809 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1810 		    regbase == NIX_AF_TL4X_PIR(0))
1811 			return false;
1812 		break;
1813 	}
1814 	return true;
1815 }
1816 
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)1817 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1818 				u16 pcifunc, int blkaddr)
1819 {
1820 	u32 *pfvf_map;
1821 	int schq;
1822 
1823 	schq = nix_get_tx_link(rvu, pcifunc);
1824 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1825 	/* Skip if PF has already done the config */
1826 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1827 		return;
1828 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1829 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1830 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1831 		    TXSCH_TL1_DFLT_RR_QTM);
1832 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1833 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1834 }
1835 
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct msg_rsp * rsp)1836 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1837 				    struct nix_txschq_config *req,
1838 				    struct msg_rsp *rsp)
1839 {
1840 	struct rvu_hwinfo *hw = rvu->hw;
1841 	u16 pcifunc = req->hdr.pcifunc;
1842 	u64 reg, regval, schq_regbase;
1843 	struct nix_txsch *txsch;
1844 	struct nix_hw *nix_hw;
1845 	int blkaddr, idx, err;
1846 	int nixlf, schq;
1847 	u32 *pfvf_map;
1848 
1849 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1850 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1851 		return NIX_AF_INVAL_TXSCHQ_CFG;
1852 
1853 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1854 	if (err)
1855 		return err;
1856 
1857 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1858 	if (!nix_hw)
1859 		return -EINVAL;
1860 
1861 	txsch = &nix_hw->txsch[req->lvl];
1862 	pfvf_map = txsch->pfvf_map;
1863 
1864 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1865 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1866 		mutex_lock(&rvu->rsrc_lock);
1867 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1868 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1869 		mutex_unlock(&rvu->rsrc_lock);
1870 		return 0;
1871 	}
1872 
1873 	for (idx = 0; idx < req->num_regs; idx++) {
1874 		reg = req->reg[idx];
1875 		regval = req->regval[idx];
1876 		schq_regbase = reg & 0xFFFF;
1877 
1878 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1879 					       txsch->lvl, reg, regval))
1880 			return NIX_AF_INVAL_TXSCHQ_CFG;
1881 
1882 		/* Check if shaping and coloring is supported */
1883 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1884 			continue;
1885 
1886 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1887 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1888 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1889 					   pcifunc, 0);
1890 			regval &= ~(0x7FULL << 24);
1891 			regval |= ((u64)nixlf << 24);
1892 		}
1893 
1894 		/* Clear 'BP_ENA' config, if it's not allowed */
1895 		if (!hw->cap.nix_tx_link_bp) {
1896 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1897 			    (schq_regbase & 0xFF00) ==
1898 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1899 				regval &= ~BIT_ULL(13);
1900 		}
1901 
1902 		/* Mark config as done for TL1 by PF */
1903 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1904 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1905 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1906 			mutex_lock(&rvu->rsrc_lock);
1907 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1908 							NIX_TXSCHQ_CFG_DONE);
1909 			mutex_unlock(&rvu->rsrc_lock);
1910 		}
1911 
1912 		/* SMQ flush is special hence split register writes such
1913 		 * that flush first and write rest of the bits later.
1914 		 */
1915 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1916 		    (regval & BIT_ULL(49))) {
1917 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1918 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1919 			regval &= ~BIT_ULL(49);
1920 		}
1921 		rvu_write64(rvu, blkaddr, reg, regval);
1922 	}
1923 
1924 	return 0;
1925 }
1926 
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)1927 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1928 			   struct nix_vtag_config *req)
1929 {
1930 	u64 regval = req->vtag_size;
1931 
1932 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1933 		return -EINVAL;
1934 
1935 	if (req->rx.capture_vtag)
1936 		regval |= BIT_ULL(5);
1937 	if (req->rx.strip_vtag)
1938 		regval |= BIT_ULL(4);
1939 
1940 	rvu_write64(rvu, blkaddr,
1941 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1942 	return 0;
1943 }
1944 
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct msg_rsp * rsp)1945 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1946 				  struct nix_vtag_config *req,
1947 				  struct msg_rsp *rsp)
1948 {
1949 	u16 pcifunc = req->hdr.pcifunc;
1950 	int blkaddr, nixlf, err;
1951 
1952 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1953 	if (err)
1954 		return err;
1955 
1956 	if (req->cfg_type) {
1957 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1958 		if (err)
1959 			return NIX_AF_ERR_PARAM;
1960 	} else {
1961 		/* TODO: handle tx vtag configuration */
1962 		return 0;
1963 	}
1964 
1965 	return 0;
1966 }
1967 
nix_setup_mce(struct rvu * rvu,int mce,u8 op,u16 pcifunc,int next,bool eol)1968 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1969 			 u16 pcifunc, int next, bool eol)
1970 {
1971 	struct nix_aq_enq_req aq_req;
1972 	int err;
1973 
1974 	aq_req.hdr.pcifunc = 0;
1975 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1976 	aq_req.op = op;
1977 	aq_req.qidx = mce;
1978 
1979 	/* Forward bcast pkts to RQ0, RSS not needed */
1980 	aq_req.mce.op = 0;
1981 	aq_req.mce.index = 0;
1982 	aq_req.mce.eol = eol;
1983 	aq_req.mce.pf_func = pcifunc;
1984 	aq_req.mce.next = next;
1985 
1986 	/* All fields valid */
1987 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1988 
1989 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1990 	if (err) {
1991 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1992 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1993 		return err;
1994 	}
1995 	return 0;
1996 }
1997 
nix_update_mce_list(struct nix_mce_list * mce_list,u16 pcifunc,bool add)1998 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1999 			       u16 pcifunc, bool add)
2000 {
2001 	struct mce *mce, *tail = NULL;
2002 	bool delete = false;
2003 
2004 	/* Scan through the current list */
2005 	hlist_for_each_entry(mce, &mce_list->head, node) {
2006 		/* If already exists, then delete */
2007 		if (mce->pcifunc == pcifunc && !add) {
2008 			delete = true;
2009 			break;
2010 		}
2011 		tail = mce;
2012 	}
2013 
2014 	if (delete) {
2015 		hlist_del(&mce->node);
2016 		kfree(mce);
2017 		mce_list->count--;
2018 		return 0;
2019 	}
2020 
2021 	if (!add)
2022 		return 0;
2023 
2024 	/* Add a new one to the list, at the tail */
2025 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2026 	if (!mce)
2027 		return -ENOMEM;
2028 	mce->pcifunc = pcifunc;
2029 	if (!tail)
2030 		hlist_add_head(&mce->node, &mce_list->head);
2031 	else
2032 		hlist_add_behind(&mce->node, &tail->node);
2033 	mce_list->count++;
2034 	return 0;
2035 }
2036 
nix_update_bcast_mce_list(struct rvu * rvu,u16 pcifunc,bool add)2037 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2038 {
2039 	int err = 0, idx, next_idx, last_idx;
2040 	struct nix_mce_list *mce_list;
2041 	struct nix_mcast *mcast;
2042 	struct nix_hw *nix_hw;
2043 	struct rvu_pfvf *pfvf;
2044 	struct mce *mce;
2045 	int blkaddr;
2046 
2047 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2048 	if (is_afvf(pcifunc))
2049 		return 0;
2050 
2051 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2052 	if (blkaddr < 0)
2053 		return 0;
2054 
2055 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2056 	if (!nix_hw)
2057 		return 0;
2058 
2059 	mcast = &nix_hw->mcast;
2060 
2061 	/* Get this PF/VF func's MCE index */
2062 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2063 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2064 
2065 	mce_list = &pfvf->bcast_mce_list;
2066 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2067 		dev_err(rvu->dev,
2068 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2069 			__func__, idx, mce_list->max,
2070 			pcifunc >> RVU_PFVF_PF_SHIFT);
2071 		return -EINVAL;
2072 	}
2073 
2074 	mutex_lock(&mcast->mce_lock);
2075 
2076 	err = nix_update_mce_list(mce_list, pcifunc, add);
2077 	if (err)
2078 		goto end;
2079 
2080 	/* Disable MCAM entry in NPC */
2081 	if (!mce_list->count) {
2082 		rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2083 		goto end;
2084 	}
2085 
2086 	/* Dump the updated list to HW */
2087 	idx = pfvf->bcast_mce_idx;
2088 	last_idx = idx + mce_list->count - 1;
2089 	hlist_for_each_entry(mce, &mce_list->head, node) {
2090 		if (idx > last_idx)
2091 			break;
2092 
2093 		next_idx = idx + 1;
2094 		/* EOL should be set in last MCE */
2095 		err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
2096 				    mce->pcifunc, next_idx,
2097 				    (next_idx > last_idx) ? true : false);
2098 		if (err)
2099 			goto end;
2100 		idx++;
2101 	}
2102 
2103 end:
2104 	mutex_unlock(&mcast->mce_lock);
2105 	return err;
2106 }
2107 
nix_setup_bcast_tables(struct rvu * rvu,struct nix_hw * nix_hw)2108 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2109 {
2110 	struct nix_mcast *mcast = &nix_hw->mcast;
2111 	int err, pf, numvfs, idx;
2112 	struct rvu_pfvf *pfvf;
2113 	u16 pcifunc;
2114 	u64 cfg;
2115 
2116 	/* Skip PF0 (i.e AF) */
2117 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2118 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2119 		/* If PF is not enabled, nothing to do */
2120 		if (!((cfg >> 20) & 0x01))
2121 			continue;
2122 		/* Get numVFs attached to this PF */
2123 		numvfs = (cfg >> 12) & 0xFF;
2124 
2125 		pfvf = &rvu->pf[pf];
2126 		/* Save the start MCE */
2127 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2128 
2129 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2130 
2131 		for (idx = 0; idx < (numvfs + 1); idx++) {
2132 			/* idx-0 is for PF, followed by VFs */
2133 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2134 			pcifunc |= idx;
2135 			/* Add dummy entries now, so that we don't have to check
2136 			 * for whether AQ_OP should be INIT/WRITE later on.
2137 			 * Will be updated when a NIXLF is attached/detached to
2138 			 * these PF/VFs.
2139 			 */
2140 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
2141 					    NIX_AQ_INSTOP_INIT,
2142 					    pcifunc, 0, true);
2143 			if (err)
2144 				return err;
2145 		}
2146 	}
2147 	return 0;
2148 }
2149 
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2150 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2151 {
2152 	struct nix_mcast *mcast = &nix_hw->mcast;
2153 	struct rvu_hwinfo *hw = rvu->hw;
2154 	int err, size;
2155 
2156 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2157 	size = (1ULL << size);
2158 
2159 	/* Alloc memory for multicast/mirror replication entries */
2160 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2161 			 (256UL << MC_TBL_SIZE), size);
2162 	if (err)
2163 		return -ENOMEM;
2164 
2165 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2166 		    (u64)mcast->mce_ctx->iova);
2167 
2168 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2169 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2170 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2171 
2172 	/* Alloc memory for multicast replication buffers */
2173 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2174 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2175 			 (8UL << MC_BUF_CNT), size);
2176 	if (err)
2177 		return -ENOMEM;
2178 
2179 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2180 		    (u64)mcast->mcast_buf->iova);
2181 
2182 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2183 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2184 
2185 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2186 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2187 		    BIT_ULL(20) | MC_BUF_CNT);
2188 
2189 	mutex_init(&mcast->mce_lock);
2190 
2191 	return nix_setup_bcast_tables(rvu, nix_hw);
2192 }
2193 
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2194 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2195 {
2196 	struct nix_txsch *txsch;
2197 	int err, lvl, schq;
2198 	u64 cfg, reg;
2199 
2200 	/* Get scheduler queue count of each type and alloc
2201 	 * bitmap for each for alloc/free/attach operations.
2202 	 */
2203 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2204 		txsch = &nix_hw->txsch[lvl];
2205 		txsch->lvl = lvl;
2206 		switch (lvl) {
2207 		case NIX_TXSCH_LVL_SMQ:
2208 			reg = NIX_AF_MDQ_CONST;
2209 			break;
2210 		case NIX_TXSCH_LVL_TL4:
2211 			reg = NIX_AF_TL4_CONST;
2212 			break;
2213 		case NIX_TXSCH_LVL_TL3:
2214 			reg = NIX_AF_TL3_CONST;
2215 			break;
2216 		case NIX_TXSCH_LVL_TL2:
2217 			reg = NIX_AF_TL2_CONST;
2218 			break;
2219 		case NIX_TXSCH_LVL_TL1:
2220 			reg = NIX_AF_TL1_CONST;
2221 			break;
2222 		}
2223 		cfg = rvu_read64(rvu, blkaddr, reg);
2224 		txsch->schq.max = cfg & 0xFFFF;
2225 		err = rvu_alloc_bitmap(&txsch->schq);
2226 		if (err)
2227 			return err;
2228 
2229 		/* Allocate memory for scheduler queues to
2230 		 * PF/VF pcifunc mapping info.
2231 		 */
2232 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2233 					       sizeof(u32), GFP_KERNEL);
2234 		if (!txsch->pfvf_map)
2235 			return -ENOMEM;
2236 		for (schq = 0; schq < txsch->schq.max; schq++)
2237 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2238 	}
2239 	return 0;
2240 }
2241 
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)2242 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2243 				int blkaddr, u32 cfg)
2244 {
2245 	int fmt_idx;
2246 
2247 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2248 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2249 			return fmt_idx;
2250 	}
2251 	if (fmt_idx >= nix_hw->mark_format.total)
2252 		return -ERANGE;
2253 
2254 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2255 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2256 	nix_hw->mark_format.in_use++;
2257 	return fmt_idx;
2258 }
2259 
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2260 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2261 				    int blkaddr)
2262 {
2263 	u64 cfgs[] = {
2264 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2265 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2266 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2267 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2268 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2269 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2270 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2271 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2272 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2273 	};
2274 	int i, rc;
2275 	u64 total;
2276 
2277 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2278 	nix_hw->mark_format.total = (u8)total;
2279 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2280 					       GFP_KERNEL);
2281 	if (!nix_hw->mark_format.cfg)
2282 		return -ENOMEM;
2283 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2284 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2285 		if (rc < 0)
2286 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2287 				i, rc);
2288 	}
2289 
2290 	return 0;
2291 }
2292 
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)2293 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2294 				   struct msg_rsp *rsp)
2295 {
2296 	u16 pcifunc = req->hdr.pcifunc;
2297 	int i, nixlf, blkaddr, err;
2298 	u64 stats;
2299 
2300 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2301 	if (err)
2302 		return err;
2303 
2304 	/* Get stats count supported by HW */
2305 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2306 
2307 	/* Reset tx stats */
2308 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2309 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2310 
2311 	/* Reset rx stats */
2312 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2313 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2314 
2315 	return 0;
2316 }
2317 
2318 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)2319 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2320 {
2321 	int i;
2322 
2323 	/* Scan over exiting algo entries to find a match */
2324 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2325 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2326 			return i;
2327 
2328 	return -ERANGE;
2329 }
2330 
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)2331 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2332 {
2333 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2334 	int max_key_off, max_bit_pos, group_member;
2335 	struct nix_rx_flowkey_alg *field;
2336 	struct nix_rx_flowkey_alg tmp;
2337 	u32 key_type, valid_key;
2338 
2339 	if (!alg)
2340 		return -EINVAL;
2341 
2342 #define FIELDS_PER_ALG  5
2343 #define MAX_KEY_OFF	40
2344 	/* Clear all fields */
2345 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2346 
2347 	/* Each of the 32 possible flow key algorithm definitions should
2348 	 * fall into above incremental config (except ALG0). Otherwise a
2349 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2350 	 *
2351 	 * If a different definition or combination needed then NPC MCAM
2352 	 * has to be programmed to filter such pkts and it's action should
2353 	 * point to this definition to calculate flowtag or hash.
2354 	 *
2355 	 * The `for loop` goes over _all_ protocol field and the following
2356 	 * variables depicts the state machine forward progress logic.
2357 	 *
2358 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2359 	 * in field->key_offset update.
2360 	 * field_marker - Enabled when a new field needs to be selected.
2361 	 * group_member - Enabled when protocol is part of a group.
2362 	 */
2363 
2364 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2365 	nr_field = 0; key_off = 0; field_marker = 1;
2366 	field = &tmp; max_bit_pos = fls(flow_cfg);
2367 	for (idx = 0;
2368 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2369 	     key_off < MAX_KEY_OFF; idx++) {
2370 		key_type = BIT(idx);
2371 		valid_key = flow_cfg & key_type;
2372 		/* Found a field marker, reset the field values */
2373 		if (field_marker)
2374 			memset(&tmp, 0, sizeof(tmp));
2375 
2376 		field_marker = true;
2377 		keyoff_marker = true;
2378 		switch (key_type) {
2379 		case NIX_FLOW_KEY_TYPE_PORT:
2380 			field->sel_chan = true;
2381 			/* This should be set to 1, when SEL_CHAN is set */
2382 			field->bytesm1 = 1;
2383 			break;
2384 		case NIX_FLOW_KEY_TYPE_IPV4:
2385 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2386 			field->lid = NPC_LID_LC;
2387 			field->ltype_match = NPC_LT_LC_IP;
2388 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2389 				field->lid = NPC_LID_LG;
2390 				field->ltype_match = NPC_LT_LG_TU_IP;
2391 			}
2392 			field->hdr_offset = 12; /* SIP offset */
2393 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2394 			field->ltype_mask = 0xF; /* Match only IPv4 */
2395 			keyoff_marker = false;
2396 			break;
2397 		case NIX_FLOW_KEY_TYPE_IPV6:
2398 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2399 			field->lid = NPC_LID_LC;
2400 			field->ltype_match = NPC_LT_LC_IP6;
2401 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2402 				field->lid = NPC_LID_LG;
2403 				field->ltype_match = NPC_LT_LG_TU_IP6;
2404 			}
2405 			field->hdr_offset = 8; /* SIP offset */
2406 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2407 			field->ltype_mask = 0xF; /* Match only IPv6 */
2408 			break;
2409 		case NIX_FLOW_KEY_TYPE_TCP:
2410 		case NIX_FLOW_KEY_TYPE_UDP:
2411 		case NIX_FLOW_KEY_TYPE_SCTP:
2412 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2413 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2414 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2415 			field->lid = NPC_LID_LD;
2416 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2417 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2418 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2419 				field->lid = NPC_LID_LH;
2420 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2421 
2422 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2423 			 * so no need to change the ltype_match, just change
2424 			 * the lid for inner protocols
2425 			 */
2426 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2427 				     (int)NPC_LT_LH_TU_TCP);
2428 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2429 				     (int)NPC_LT_LH_TU_UDP);
2430 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2431 				     (int)NPC_LT_LH_TU_SCTP);
2432 
2433 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2434 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2435 			    valid_key) {
2436 				field->ltype_match |= NPC_LT_LD_TCP;
2437 				group_member = true;
2438 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2439 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2440 				   valid_key) {
2441 				field->ltype_match |= NPC_LT_LD_UDP;
2442 				group_member = true;
2443 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2444 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2445 				   valid_key) {
2446 				field->ltype_match |= NPC_LT_LD_SCTP;
2447 				group_member = true;
2448 			}
2449 			field->ltype_mask = ~field->ltype_match;
2450 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2451 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2452 				/* Handle the case where any of the group item
2453 				 * is enabled in the group but not the final one
2454 				 */
2455 				if (group_member) {
2456 					valid_key = true;
2457 					group_member = false;
2458 				}
2459 			} else {
2460 				field_marker = false;
2461 				keyoff_marker = false;
2462 			}
2463 			break;
2464 		case NIX_FLOW_KEY_TYPE_NVGRE:
2465 			field->lid = NPC_LID_LD;
2466 			field->hdr_offset = 4; /* VSID offset */
2467 			field->bytesm1 = 2;
2468 			field->ltype_match = NPC_LT_LD_NVGRE;
2469 			field->ltype_mask = 0xF;
2470 			break;
2471 		case NIX_FLOW_KEY_TYPE_VXLAN:
2472 		case NIX_FLOW_KEY_TYPE_GENEVE:
2473 			field->lid = NPC_LID_LE;
2474 			field->bytesm1 = 2;
2475 			field->hdr_offset = 4;
2476 			field->ltype_mask = 0xF;
2477 			field_marker = false;
2478 			keyoff_marker = false;
2479 
2480 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2481 				field->ltype_match |= NPC_LT_LE_VXLAN;
2482 				group_member = true;
2483 			}
2484 
2485 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2486 				field->ltype_match |= NPC_LT_LE_GENEVE;
2487 				group_member = true;
2488 			}
2489 
2490 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2491 				if (group_member) {
2492 					field->ltype_mask = ~field->ltype_match;
2493 					field_marker = true;
2494 					keyoff_marker = true;
2495 					valid_key = true;
2496 					group_member = false;
2497 				}
2498 			}
2499 			break;
2500 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2501 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2502 			field->lid = NPC_LID_LA;
2503 			field->ltype_match = NPC_LT_LA_ETHER;
2504 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2505 				field->lid = NPC_LID_LF;
2506 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2507 			}
2508 			field->hdr_offset = 0;
2509 			field->bytesm1 = 5; /* DMAC 6 Byte */
2510 			field->ltype_mask = 0xF;
2511 			break;
2512 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2513 			field->lid = NPC_LID_LC;
2514 			field->hdr_offset = 40; /* IPV6 hdr */
2515 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2516 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2517 			field->ltype_mask = 0xF;
2518 			break;
2519 		case NIX_FLOW_KEY_TYPE_GTPU:
2520 			field->lid = NPC_LID_LE;
2521 			field->hdr_offset = 4;
2522 			field->bytesm1 = 3; /* 4 bytes TID*/
2523 			field->ltype_match = NPC_LT_LE_GTPU;
2524 			field->ltype_mask = 0xF;
2525 			break;
2526 		case NIX_FLOW_KEY_TYPE_VLAN:
2527 			field->lid = NPC_LID_LB;
2528 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2529 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2530 			field->ltype_match = NPC_LT_LB_CTAG;
2531 			field->ltype_mask = 0xF;
2532 			field->fn_mask = 1; /* Mask out the first nibble */
2533 			break;
2534 		}
2535 		field->ena = 1;
2536 
2537 		/* Found a valid flow key type */
2538 		if (valid_key) {
2539 			field->key_offset = key_off;
2540 			memcpy(&alg[nr_field], field, sizeof(*field));
2541 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2542 
2543 			/* Found a field marker, get the next field */
2544 			if (field_marker)
2545 				nr_field++;
2546 		}
2547 
2548 		/* Found a keyoff marker, update the new key_off */
2549 		if (keyoff_marker) {
2550 			key_off += max_key_off;
2551 			max_key_off = 0;
2552 		}
2553 	}
2554 	/* Processed all the flow key types */
2555 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2556 		return 0;
2557 	else
2558 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2559 }
2560 
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)2561 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2562 {
2563 	u64 field[FIELDS_PER_ALG];
2564 	struct nix_hw *hw;
2565 	int fid, rc;
2566 
2567 	hw = get_nix_hw(rvu->hw, blkaddr);
2568 	if (!hw)
2569 		return -EINVAL;
2570 
2571 	/* No room to add new flow hash algoritham */
2572 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2573 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
2574 
2575 	/* Generate algo fields for the given flow_cfg */
2576 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2577 	if (rc)
2578 		return rc;
2579 
2580 	/* Update ALGX_FIELDX register with generated fields */
2581 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2582 		rvu_write64(rvu, blkaddr,
2583 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2584 							   fid), field[fid]);
2585 
2586 	/* Store the flow_cfg for futher lookup */
2587 	rc = hw->flowkey.in_use;
2588 	hw->flowkey.flowkey[rc] = flow_cfg;
2589 	hw->flowkey.in_use++;
2590 
2591 	return rc;
2592 }
2593 
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)2594 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2595 					 struct nix_rss_flowkey_cfg *req,
2596 					 struct nix_rss_flowkey_cfg_rsp *rsp)
2597 {
2598 	u16 pcifunc = req->hdr.pcifunc;
2599 	int alg_idx, nixlf, blkaddr;
2600 	struct nix_hw *nix_hw;
2601 	int err;
2602 
2603 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2604 	if (err)
2605 		return err;
2606 
2607 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2608 	if (!nix_hw)
2609 		return -EINVAL;
2610 
2611 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2612 	/* Failed to get algo index from the exiting list, reserve new  */
2613 	if (alg_idx < 0) {
2614 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2615 						  req->flowkey_cfg);
2616 		if (alg_idx < 0)
2617 			return alg_idx;
2618 	}
2619 	rsp->alg_idx = alg_idx;
2620 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2621 				       alg_idx, req->mcam_index);
2622 	return 0;
2623 }
2624 
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)2625 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2626 {
2627 	u32 flowkey_cfg, minkey_cfg;
2628 	int alg, fid, rc;
2629 
2630 	/* Disable all flow key algx fieldx */
2631 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2632 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2633 			rvu_write64(rvu, blkaddr,
2634 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2635 				    0);
2636 	}
2637 
2638 	/* IPv4/IPv6 SIP/DIPs */
2639 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2640 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2641 	if (rc < 0)
2642 		return rc;
2643 
2644 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2645 	minkey_cfg = flowkey_cfg;
2646 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2647 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2648 	if (rc < 0)
2649 		return rc;
2650 
2651 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2652 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2653 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2654 	if (rc < 0)
2655 		return rc;
2656 
2657 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2658 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2659 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2660 	if (rc < 0)
2661 		return rc;
2662 
2663 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2664 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2665 			NIX_FLOW_KEY_TYPE_UDP;
2666 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2667 	if (rc < 0)
2668 		return rc;
2669 
2670 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2671 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2672 			NIX_FLOW_KEY_TYPE_SCTP;
2673 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2674 	if (rc < 0)
2675 		return rc;
2676 
2677 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2678 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2679 			NIX_FLOW_KEY_TYPE_SCTP;
2680 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2681 	if (rc < 0)
2682 		return rc;
2683 
2684 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2685 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2686 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2687 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2688 	if (rc < 0)
2689 		return rc;
2690 
2691 	return 0;
2692 }
2693 
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)2694 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2695 				      struct nix_set_mac_addr *req,
2696 				      struct msg_rsp *rsp)
2697 {
2698 	u16 pcifunc = req->hdr.pcifunc;
2699 	int blkaddr, nixlf, err;
2700 	struct rvu_pfvf *pfvf;
2701 
2702 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2703 	if (err)
2704 		return err;
2705 
2706 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2707 
2708 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2709 
2710 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2711 				    pfvf->rx_chan_base, req->mac_addr);
2712 
2713 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2714 
2715 	return 0;
2716 }
2717 
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)2718 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2719 				      struct msg_req *req,
2720 				      struct nix_get_mac_addr_rsp *rsp)
2721 {
2722 	u16 pcifunc = req->hdr.pcifunc;
2723 	struct rvu_pfvf *pfvf;
2724 
2725 	if (!is_nixlf_attached(rvu, pcifunc))
2726 		return NIX_AF_ERR_AF_LF_INVALID;
2727 
2728 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2729 
2730 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2731 
2732 	return 0;
2733 }
2734 
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)2735 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2736 				     struct msg_rsp *rsp)
2737 {
2738 	bool allmulti = false, disable_promisc = false;
2739 	u16 pcifunc = req->hdr.pcifunc;
2740 	int blkaddr, nixlf, err;
2741 	struct rvu_pfvf *pfvf;
2742 
2743 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2744 	if (err)
2745 		return err;
2746 
2747 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2748 
2749 	if (req->mode & NIX_RX_MODE_PROMISC)
2750 		allmulti = false;
2751 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
2752 		allmulti = true;
2753 	else
2754 		disable_promisc = true;
2755 
2756 	if (disable_promisc)
2757 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2758 	else
2759 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2760 					      pfvf->rx_chan_base, allmulti);
2761 
2762 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2763 
2764 	return 0;
2765 }
2766 
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)2767 static void nix_find_link_frs(struct rvu *rvu,
2768 			      struct nix_frs_cfg *req, u16 pcifunc)
2769 {
2770 	int pf = rvu_get_pf(pcifunc);
2771 	struct rvu_pfvf *pfvf;
2772 	int maxlen, minlen;
2773 	int numvfs, hwvf;
2774 	int vf;
2775 
2776 	/* Update with requester's min/max lengths */
2777 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2778 	pfvf->maxlen = req->maxlen;
2779 	if (req->update_minlen)
2780 		pfvf->minlen = req->minlen;
2781 
2782 	maxlen = req->maxlen;
2783 	minlen = req->update_minlen ? req->minlen : 0;
2784 
2785 	/* Get this PF's numVFs and starting hwvf */
2786 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2787 
2788 	/* For each VF, compare requested max/minlen */
2789 	for (vf = 0; vf < numvfs; vf++) {
2790 		pfvf =  &rvu->hwvf[hwvf + vf];
2791 		if (pfvf->maxlen > maxlen)
2792 			maxlen = pfvf->maxlen;
2793 		if (req->update_minlen &&
2794 		    pfvf->minlen && pfvf->minlen < minlen)
2795 			minlen = pfvf->minlen;
2796 	}
2797 
2798 	/* Compare requested max/minlen with PF's max/minlen */
2799 	pfvf = &rvu->pf[pf];
2800 	if (pfvf->maxlen > maxlen)
2801 		maxlen = pfvf->maxlen;
2802 	if (req->update_minlen &&
2803 	    pfvf->minlen && pfvf->minlen < minlen)
2804 		minlen = pfvf->minlen;
2805 
2806 	/* Update the request with max/min PF's and it's VF's max/min */
2807 	req->maxlen = maxlen;
2808 	if (req->update_minlen)
2809 		req->minlen = minlen;
2810 }
2811 
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)2812 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2813 				    struct msg_rsp *rsp)
2814 {
2815 	struct rvu_hwinfo *hw = rvu->hw;
2816 	u16 pcifunc = req->hdr.pcifunc;
2817 	int pf = rvu_get_pf(pcifunc);
2818 	int blkaddr, schq, link = -1;
2819 	struct nix_txsch *txsch;
2820 	u64 cfg, lmac_fifo_len;
2821 	struct nix_hw *nix_hw;
2822 	u8 cgx = 0, lmac = 0;
2823 
2824 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2825 	if (blkaddr < 0)
2826 		return NIX_AF_ERR_AF_LF_INVALID;
2827 
2828 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2829 	if (!nix_hw)
2830 		return -EINVAL;
2831 
2832 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2833 		return NIX_AF_ERR_FRS_INVALID;
2834 
2835 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2836 		return NIX_AF_ERR_FRS_INVALID;
2837 
2838 	/* Check if requester wants to update SMQ's */
2839 	if (!req->update_smq)
2840 		goto rx_frscfg;
2841 
2842 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
2843 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2844 	mutex_lock(&rvu->rsrc_lock);
2845 	for (schq = 0; schq < txsch->schq.max; schq++) {
2846 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2847 			continue;
2848 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2849 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2850 		if (req->update_minlen)
2851 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2852 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2853 	}
2854 	mutex_unlock(&rvu->rsrc_lock);
2855 
2856 rx_frscfg:
2857 	/* Check if config is for SDP link */
2858 	if (req->sdp_link) {
2859 		if (!hw->sdp_links)
2860 			return NIX_AF_ERR_RX_LINK_INVALID;
2861 		link = hw->cgx_links + hw->lbk_links;
2862 		goto linkcfg;
2863 	}
2864 
2865 	/* Check if the request is from CGX mapped RVU PF */
2866 	if (is_pf_cgxmapped(rvu, pf)) {
2867 		/* Get CGX and LMAC to which this PF is mapped and find link */
2868 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2869 		link = (cgx * hw->lmac_per_cgx) + lmac;
2870 	} else if (pf == 0) {
2871 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
2872 		link = hw->cgx_links;
2873 	}
2874 
2875 	if (link < 0)
2876 		return NIX_AF_ERR_RX_LINK_INVALID;
2877 
2878 	nix_find_link_frs(rvu, req, pcifunc);
2879 
2880 linkcfg:
2881 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2882 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2883 	if (req->update_minlen)
2884 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
2885 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2886 
2887 	if (req->sdp_link || pf == 0)
2888 		return 0;
2889 
2890 	/* Update transmit credits for CGX links */
2891 	lmac_fifo_len =
2892 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2893 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2894 	cfg &= ~(0xFFFFFULL << 12);
2895 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2896 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2897 	return 0;
2898 }
2899 
rvu_mbox_handler_nix_rxvlan_alloc(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)2900 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2901 				      struct msg_rsp *rsp)
2902 {
2903 	struct npc_mcam_alloc_entry_req alloc_req = { };
2904 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2905 	struct npc_mcam_free_entry_req free_req = { };
2906 	u16 pcifunc = req->hdr.pcifunc;
2907 	int blkaddr, nixlf, err;
2908 	struct rvu_pfvf *pfvf;
2909 
2910 	/* LBK VFs do not have separate MCAM UCAST entry hence
2911 	 * skip allocating rxvlan for them
2912 	 */
2913 	if (is_afvf(pcifunc))
2914 		return 0;
2915 
2916 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2917 	if (pfvf->rxvlan)
2918 		return 0;
2919 
2920 	/* alloc new mcam entry */
2921 	alloc_req.hdr.pcifunc = pcifunc;
2922 	alloc_req.count = 1;
2923 
2924 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2925 						    &alloc_rsp);
2926 	if (err)
2927 		return err;
2928 
2929 	/* update entry to enable rxvlan offload */
2930 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2931 	if (blkaddr < 0) {
2932 		err = NIX_AF_ERR_AF_LF_INVALID;
2933 		goto free_entry;
2934 	}
2935 
2936 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2937 	if (nixlf < 0) {
2938 		err = NIX_AF_ERR_AF_LF_INVALID;
2939 		goto free_entry;
2940 	}
2941 
2942 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2943 	/* all it means is that rxvlan_index is valid */
2944 	pfvf->rxvlan = true;
2945 
2946 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2947 	if (err)
2948 		goto free_entry;
2949 
2950 	return 0;
2951 free_entry:
2952 	free_req.hdr.pcifunc = pcifunc;
2953 	free_req.entry = alloc_rsp.entry_list[0];
2954 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2955 	pfvf->rxvlan = false;
2956 	return err;
2957 }
2958 
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)2959 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2960 				    struct msg_rsp *rsp)
2961 {
2962 	int nixlf, blkaddr, err;
2963 	u64 cfg;
2964 
2965 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2966 	if (err)
2967 		return err;
2968 
2969 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2970 	/* Set the interface configuration */
2971 	if (req->len_verify & BIT(0))
2972 		cfg |= BIT_ULL(41);
2973 	else
2974 		cfg &= ~BIT_ULL(41);
2975 
2976 	if (req->len_verify & BIT(1))
2977 		cfg |= BIT_ULL(40);
2978 	else
2979 		cfg &= ~BIT_ULL(40);
2980 
2981 	if (req->csum_verify & BIT(0))
2982 		cfg |= BIT_ULL(37);
2983 	else
2984 		cfg &= ~BIT_ULL(37);
2985 
2986 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2987 
2988 	return 0;
2989 }
2990 
nix_link_config(struct rvu * rvu,int blkaddr)2991 static void nix_link_config(struct rvu *rvu, int blkaddr)
2992 {
2993 	struct rvu_hwinfo *hw = rvu->hw;
2994 	int cgx, lmac_cnt, slink, link;
2995 	u64 tx_credits;
2996 
2997 	/* Set default min/max packet lengths allowed on NIX Rx links.
2998 	 *
2999 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3000 	 * as undersize and report them to SW as error pkts, hence
3001 	 * setting it to 40 bytes.
3002 	 */
3003 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
3004 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3005 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3006 	}
3007 
3008 	if (hw->sdp_links) {
3009 		link = hw->cgx_links + hw->lbk_links;
3010 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3011 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3012 	}
3013 
3014 	/* Set credits for Tx links assuming max packet length allowed.
3015 	 * This will be reconfigured based on MTU set for PF/VF.
3016 	 */
3017 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3018 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3019 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
3020 		/* Enable credits and set credit pkt count to max allowed */
3021 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3022 		slink = cgx * hw->lmac_per_cgx;
3023 		for (link = slink; link < (slink + lmac_cnt); link++) {
3024 			rvu_write64(rvu, blkaddr,
3025 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3026 				    tx_credits);
3027 		}
3028 	}
3029 
3030 	/* Set Tx credits for LBK link */
3031 	slink = hw->cgx_links;
3032 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3033 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3034 		/* Enable credits and set credit pkt count to max allowed */
3035 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3036 		rvu_write64(rvu, blkaddr,
3037 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3038 	}
3039 }
3040 
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)3041 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3042 {
3043 	int idx, err;
3044 	u64 status;
3045 
3046 	/* Start X2P bus calibration */
3047 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3048 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3049 	/* Wait for calibration to complete */
3050 	err = rvu_poll_reg(rvu, blkaddr,
3051 			   NIX_AF_STATUS, BIT_ULL(10), false);
3052 	if (err) {
3053 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3054 		return err;
3055 	}
3056 
3057 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3058 	/* Check if CGX devices are ready */
3059 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3060 		/* Skip when cgx port is not available */
3061 		if (!rvu_cgx_pdata(idx, rvu) ||
3062 		    (status & (BIT_ULL(16 + idx))))
3063 			continue;
3064 		dev_err(rvu->dev,
3065 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3066 		err = -EBUSY;
3067 	}
3068 
3069 	/* Check if LBK is ready */
3070 	if (!(status & BIT_ULL(19))) {
3071 		dev_err(rvu->dev,
3072 			"LBK didn't respond to NIX X2P calibration\n");
3073 		err = -EBUSY;
3074 	}
3075 
3076 	/* Clear 'calibrate_x2p' bit */
3077 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3078 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3079 	if (err || (status & 0x3FFULL))
3080 		dev_err(rvu->dev,
3081 			"NIX X2P calibration failed, status 0x%llx\n", status);
3082 	if (err)
3083 		return err;
3084 	return 0;
3085 }
3086 
nix_aq_init(struct rvu * rvu,struct rvu_block * block)3087 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3088 {
3089 	u64 cfg;
3090 	int err;
3091 
3092 	/* Set admin queue endianness */
3093 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3094 #ifdef __BIG_ENDIAN
3095 	cfg |= BIT_ULL(8);
3096 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3097 #else
3098 	cfg &= ~BIT_ULL(8);
3099 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3100 #endif
3101 
3102 	/* Do not bypass NDC cache */
3103 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3104 	cfg &= ~0x3FFEULL;
3105 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3106 	/* Disable caching of SQB aka SQEs */
3107 	cfg |= 0x04ULL;
3108 #endif
3109 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3110 
3111 	/* Result structure can be followed by RQ/SQ/CQ context at
3112 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3113 	 * operation type. Alloc sufficient result memory for all operations.
3114 	 */
3115 	err = rvu_aq_alloc(rvu, &block->aq,
3116 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3117 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3118 	if (err)
3119 		return err;
3120 
3121 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3122 	rvu_write64(rvu, block->addr,
3123 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3124 	return 0;
3125 }
3126 
rvu_nix_init(struct rvu * rvu)3127 int rvu_nix_init(struct rvu *rvu)
3128 {
3129 	const struct npc_lt_def_cfg *ltdefs;
3130 	struct rvu_hwinfo *hw = rvu->hw;
3131 	struct rvu_block *block;
3132 	int blkaddr, err;
3133 	u64 cfg;
3134 
3135 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3136 	if (blkaddr < 0)
3137 		return 0;
3138 	block = &hw->block[blkaddr];
3139 
3140 	if (is_rvu_96xx_B0(rvu)) {
3141 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3142 		 * internal state when conditional clocks are turned off.
3143 		 * Hence enable them.
3144 		 */
3145 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3146 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3147 
3148 		/* Set chan/link to backpressure TL3 instead of TL2 */
3149 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3150 
3151 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3152 		 * This sticky mode is known to cause SQ stalls when multiple
3153 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3154 		 */
3155 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3156 		cfg &= ~BIT_ULL(15);
3157 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3158 	}
3159 
3160 	ltdefs = rvu->kpu.lt_def;
3161 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3162 	err = nix_calibrate_x2p(rvu, blkaddr);
3163 	if (err)
3164 		return err;
3165 
3166 	/* Set num of links of each type */
3167 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3168 	hw->cgx = (cfg >> 12) & 0xF;
3169 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3170 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3171 	hw->lbk_links = (cfg >> 24) & 0xF;
3172 	hw->sdp_links = 1;
3173 
3174 	/* Initialize admin queue */
3175 	err = nix_aq_init(rvu, block);
3176 	if (err)
3177 		return err;
3178 
3179 	/* Restore CINT timer delay to HW reset values */
3180 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3181 
3182 	if (blkaddr == BLKADDR_NIX0) {
3183 		hw->nix0 = devm_kzalloc(rvu->dev,
3184 					sizeof(struct nix_hw), GFP_KERNEL);
3185 		if (!hw->nix0)
3186 			return -ENOMEM;
3187 
3188 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3189 		if (err)
3190 			return err;
3191 
3192 		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3193 		if (err)
3194 			return err;
3195 
3196 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3197 		if (err)
3198 			return err;
3199 
3200 		/* Configure segmentation offload formats */
3201 		nix_setup_lso(rvu, hw->nix0, blkaddr);
3202 
3203 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3204 		 * This helps HW protocol checker to identify headers
3205 		 * and validate length and checksums.
3206 		 */
3207 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3208 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3209 			    ltdefs->rx_ol2.ltype_mask);
3210 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3211 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3212 			    ltdefs->rx_oip4.ltype_mask);
3213 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3214 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3215 			    ltdefs->rx_iip4.ltype_mask);
3216 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3217 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3218 			    ltdefs->rx_oip6.ltype_mask);
3219 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3220 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3221 			    ltdefs->rx_iip6.ltype_mask);
3222 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3223 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3224 			    ltdefs->rx_otcp.ltype_mask);
3225 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3226 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3227 			    ltdefs->rx_itcp.ltype_mask);
3228 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3229 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3230 			    ltdefs->rx_oudp.ltype_mask);
3231 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3232 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3233 			    ltdefs->rx_iudp.ltype_mask);
3234 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3235 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3236 			    ltdefs->rx_osctp.ltype_mask);
3237 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3238 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3239 			    ltdefs->rx_isctp.ltype_mask);
3240 
3241 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3242 		if (err)
3243 			return err;
3244 
3245 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3246 		nix_link_config(rvu, blkaddr);
3247 
3248 		/* Enable Channel backpressure */
3249 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3250 	}
3251 	return 0;
3252 }
3253 
rvu_nix_freemem(struct rvu * rvu)3254 void rvu_nix_freemem(struct rvu *rvu)
3255 {
3256 	struct rvu_hwinfo *hw = rvu->hw;
3257 	struct rvu_block *block;
3258 	struct nix_txsch *txsch;
3259 	struct nix_mcast *mcast;
3260 	struct nix_hw *nix_hw;
3261 	int blkaddr, lvl;
3262 
3263 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3264 	if (blkaddr < 0)
3265 		return;
3266 
3267 	block = &hw->block[blkaddr];
3268 	rvu_aq_free(rvu, block->aq);
3269 
3270 	if (blkaddr == BLKADDR_NIX0) {
3271 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3272 		if (!nix_hw)
3273 			return;
3274 
3275 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3276 			txsch = &nix_hw->txsch[lvl];
3277 			kfree(txsch->schq.bmap);
3278 		}
3279 
3280 		mcast = &nix_hw->mcast;
3281 		qmem_free(rvu->dev, mcast->mce_ctx);
3282 		qmem_free(rvu->dev, mcast->mcast_buf);
3283 		mutex_destroy(&mcast->mce_lock);
3284 	}
3285 }
3286 
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3287 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3288 				     struct msg_rsp *rsp)
3289 {
3290 	u16 pcifunc = req->hdr.pcifunc;
3291 	int nixlf, err;
3292 
3293 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3294 	if (err)
3295 		return err;
3296 
3297 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3298 
3299 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3300 }
3301 
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3302 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3303 				    struct msg_rsp *rsp)
3304 {
3305 	u16 pcifunc = req->hdr.pcifunc;
3306 	int nixlf, err;
3307 
3308 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3309 	if (err)
3310 		return err;
3311 
3312 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3313 
3314 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3315 }
3316 
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)3317 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3318 {
3319 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3320 	struct hwctx_disable_req ctx_req;
3321 	int err;
3322 
3323 	ctx_req.hdr.pcifunc = pcifunc;
3324 
3325 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3326 	nix_interface_deinit(rvu, pcifunc, nixlf);
3327 	nix_rx_sync(rvu, blkaddr);
3328 	nix_txschq_free(rvu, pcifunc);
3329 
3330 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3331 
3332 	if (pfvf->sq_ctx) {
3333 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3334 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3335 		if (err)
3336 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3337 	}
3338 
3339 	if (pfvf->rq_ctx) {
3340 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3341 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3342 		if (err)
3343 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3344 	}
3345 
3346 	if (pfvf->cq_ctx) {
3347 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3348 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3349 		if (err)
3350 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3351 	}
3352 
3353 	nix_ctx_free(rvu, pfvf);
3354 }
3355 
3356 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3357 
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)3358 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3359 {
3360 	struct rvu_hwinfo *hw = rvu->hw;
3361 	struct rvu_block *block;
3362 	int blkaddr;
3363 	int nixlf;
3364 	u64 cfg;
3365 
3366 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3367 	if (blkaddr < 0)
3368 		return NIX_AF_ERR_AF_LF_INVALID;
3369 
3370 	block = &hw->block[blkaddr];
3371 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3372 	if (nixlf < 0)
3373 		return NIX_AF_ERR_AF_LF_INVALID;
3374 
3375 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3376 
3377 	if (enable)
3378 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3379 	else
3380 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3381 
3382 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3383 
3384 	return 0;
3385 }
3386 
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3387 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3388 					  struct msg_rsp *rsp)
3389 {
3390 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3391 }
3392 
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3393 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3394 					   struct msg_rsp *rsp)
3395 {
3396 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3397 }
3398 
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)3399 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3400 					struct nix_lso_format_cfg *req,
3401 					struct nix_lso_format_cfg_rsp *rsp)
3402 {
3403 	u16 pcifunc = req->hdr.pcifunc;
3404 	struct nix_hw *nix_hw;
3405 	struct rvu_pfvf *pfvf;
3406 	int blkaddr, idx, f;
3407 	u64 reg;
3408 
3409 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3410 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3411 	if (!pfvf->nixlf || blkaddr < 0)
3412 		return NIX_AF_ERR_AF_LF_INVALID;
3413 
3414 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3415 	if (!nix_hw)
3416 		return -EINVAL;
3417 
3418 	/* Find existing matching LSO format, if any */
3419 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3420 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3421 			reg = rvu_read64(rvu, blkaddr,
3422 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3423 			if (req->fields[f] != (reg & req->field_mask))
3424 				break;
3425 		}
3426 
3427 		if (f == NIX_LSO_FIELD_MAX)
3428 			break;
3429 	}
3430 
3431 	if (idx < nix_hw->lso.in_use) {
3432 		/* Match found */
3433 		rsp->lso_format_idx = idx;
3434 		return 0;
3435 	}
3436 
3437 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3438 		return NIX_AF_ERR_LSO_CFG_FAIL;
3439 
3440 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3441 
3442 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3443 		rvu_write64(rvu, blkaddr,
3444 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3445 			    req->fields[f]);
3446 
3447 	return 0;
3448 }
3449