• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* bnx2x_sriov.c: QLogic Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  * Copyright 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * Unless you and QLogic execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2, available
10  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
11  *
12  * Notwithstanding the above, under no circumstances may you combine this
13  * software in any way with any other QLogic software provided under a
14  * license other than the GPL, without QLogic's express prior written
15  * consent.
16  *
17  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18  * Written by: Shmulik Ravid
19  *	       Ariel Elior <ariel.elior@qlogic.com>
20  *
21  */
22 #include "bnx2x.h"
23 #include "bnx2x_init.h"
24 #include "bnx2x_cmn.h"
25 #include "bnx2x_sp.h"
26 #include <linux/crc32.h>
27 #include <linux/if_vlan.h>
28 
29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
30 			    struct bnx2x_virtf **vf,
31 			    struct pf_vf_bulletin_content **bulletin,
32 			    bool test_queue);
33 
34 /* General service functions */
storm_memset_vf_to_pf(struct bnx2x * bp,u16 abs_fid,u16 pf_id)35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
36 					 u16 pf_id)
37 {
38 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
39 		pf_id);
40 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
41 		pf_id);
42 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
43 		pf_id);
44 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
45 		pf_id);
46 }
47 
storm_memset_func_en(struct bnx2x * bp,u16 abs_fid,u8 enable)48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
49 					u8 enable)
50 {
51 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
52 		enable);
53 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
54 		enable);
55 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
56 		enable);
57 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
58 		enable);
59 }
60 
bnx2x_vf_idx_by_abs_fid(struct bnx2x * bp,u16 abs_vfid)61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
62 {
63 	int idx;
64 
65 	for_each_vf(bp, idx)
66 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
67 			break;
68 	return idx;
69 }
70 
71 static
bnx2x_vf_by_abs_fid(struct bnx2x * bp,u16 abs_vfid)72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
73 {
74 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
75 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
76 }
77 
bnx2x_vf_igu_ack_sb(struct bnx2x * bp,struct bnx2x_virtf * vf,u8 igu_sb_id,u8 segment,u16 index,u8 op,u8 update)78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
79 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
80 				u8 update)
81 {
82 	/* acking a VF sb through the PF - use the GRC */
83 	u32 ctl;
84 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
85 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
86 	u32 func_encode = vf->abs_vfid;
87 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
88 	struct igu_regular cmd_data = {0};
89 
90 	cmd_data.sb_id_and_flags =
91 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
92 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
93 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
94 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
95 
96 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
97 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
98 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
99 
100 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 	   cmd_data.sb_id_and_flags, igu_addr_data);
102 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
103 	barrier();
104 
105 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
106 	   ctl, igu_addr_ctl);
107 	REG_WR(bp, igu_addr_ctl, ctl);
108 	barrier();
109 }
110 
bnx2x_validate_vf_sp_objs(struct bnx2x * bp,struct bnx2x_virtf * vf,bool print_err)111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
112 				       struct bnx2x_virtf *vf,
113 				       bool print_err)
114 {
115 	if (!bnx2x_leading_vfq(vf, sp_initialized)) {
116 		if (print_err)
117 			BNX2X_ERR("Slowpath objects not yet initialized!\n");
118 		else
119 			DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
120 		return false;
121 	}
122 	return true;
123 }
124 
125 /* VFOP operations states */
bnx2x_vfop_qctor_dump_tx(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_queue_init_params * init_params,struct bnx2x_queue_setup_params * setup_params,u16 q_idx,u16 sb_idx)126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
127 			      struct bnx2x_queue_init_params *init_params,
128 			      struct bnx2x_queue_setup_params *setup_params,
129 			      u16 q_idx, u16 sb_idx)
130 {
131 	DP(BNX2X_MSG_IOV,
132 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
133 	   vf->abs_vfid,
134 	   q_idx,
135 	   sb_idx,
136 	   init_params->tx.sb_cq_index,
137 	   init_params->tx.hc_rate,
138 	   setup_params->flags,
139 	   setup_params->txq_params.traffic_type);
140 }
141 
bnx2x_vfop_qctor_dump_rx(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_queue_init_params * init_params,struct bnx2x_queue_setup_params * setup_params,u16 q_idx,u16 sb_idx)142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
143 			    struct bnx2x_queue_init_params *init_params,
144 			    struct bnx2x_queue_setup_params *setup_params,
145 			    u16 q_idx, u16 sb_idx)
146 {
147 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
148 
149 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
150 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
151 	   vf->abs_vfid,
152 	   q_idx,
153 	   sb_idx,
154 	   init_params->rx.sb_cq_index,
155 	   init_params->rx.hc_rate,
156 	   setup_params->gen_params.mtu,
157 	   rxq_params->buf_sz,
158 	   rxq_params->sge_buf_sz,
159 	   rxq_params->max_sges_pkt,
160 	   rxq_params->tpa_agg_sz,
161 	   setup_params->flags,
162 	   rxq_params->drop_flags,
163 	   rxq_params->cache_line_log);
164 }
165 
bnx2x_vfop_qctor_prep(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_vf_queue * q,struct bnx2x_vf_queue_construct_params * p,unsigned long q_type)166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
167 			   struct bnx2x_virtf *vf,
168 			   struct bnx2x_vf_queue *q,
169 			   struct bnx2x_vf_queue_construct_params *p,
170 			   unsigned long q_type)
171 {
172 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
173 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
174 
175 	/* INIT */
176 
177 	/* Enable host coalescing in the transition to INIT state */
178 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
179 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
180 
181 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
182 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
183 
184 	/* FW SB ID */
185 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
186 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
187 
188 	/* context */
189 	init_p->cxts[0] = q->cxt;
190 
191 	/* SETUP */
192 
193 	/* Setup-op general parameters */
194 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
195 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
196 	setup_p->gen_params.fp_hsi = vf->fp_hsi;
197 
198 	/* Setup-op flags:
199 	 * collect statistics, zero statistics, local-switching, security,
200 	 * OV for Flex10, RSS and MCAST for leading
201 	 */
202 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
203 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
204 
205 	/* for VFs, enable tx switching, bd coherency, and mac address
206 	 * anti-spoofing
207 	 */
208 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
209 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
210 	if (vf->spoofchk)
211 		__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
212 	else
213 		__clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
214 
215 	/* Setup-op rx parameters */
216 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
217 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
218 
219 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
220 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
221 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
222 
223 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
224 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
225 	}
226 
227 	/* Setup-op tx parameters */
228 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
229 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
230 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
231 	}
232 }
233 
bnx2x_vf_queue_create(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid,struct bnx2x_vf_queue_construct_params * qctor)234 static int bnx2x_vf_queue_create(struct bnx2x *bp,
235 				 struct bnx2x_virtf *vf, int qid,
236 				 struct bnx2x_vf_queue_construct_params *qctor)
237 {
238 	struct bnx2x_queue_state_params *q_params;
239 	int rc = 0;
240 
241 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
242 
243 	/* Prepare ramrod information */
244 	q_params = &qctor->qstate;
245 	q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
246 	set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
247 
248 	if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
249 	    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
250 		DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
251 		goto out;
252 	}
253 
254 	/* Run Queue 'construction' ramrods */
255 	q_params->cmd = BNX2X_Q_CMD_INIT;
256 	rc = bnx2x_queue_state_change(bp, q_params);
257 	if (rc)
258 		goto out;
259 
260 	memcpy(&q_params->params.setup, &qctor->prep_qsetup,
261 	       sizeof(struct bnx2x_queue_setup_params));
262 	q_params->cmd = BNX2X_Q_CMD_SETUP;
263 	rc = bnx2x_queue_state_change(bp, q_params);
264 	if (rc)
265 		goto out;
266 
267 	/* enable interrupts */
268 	bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
269 			    USTORM_ID, 0, IGU_INT_ENABLE, 0);
270 out:
271 	return rc;
272 }
273 
bnx2x_vf_queue_destroy(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid)274 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
275 				  int qid)
276 {
277 	enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
278 				       BNX2X_Q_CMD_TERMINATE,
279 				       BNX2X_Q_CMD_CFC_DEL};
280 	struct bnx2x_queue_state_params q_params;
281 	int rc, i;
282 
283 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
284 
285 	/* Prepare ramrod information */
286 	memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
287 	q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
288 	set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
289 
290 	if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
291 	    BNX2X_Q_LOGICAL_STATE_STOPPED) {
292 		DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
293 		goto out;
294 	}
295 
296 	/* Run Queue 'destruction' ramrods */
297 	for (i = 0; i < ARRAY_SIZE(cmds); i++) {
298 		q_params.cmd = cmds[i];
299 		rc = bnx2x_queue_state_change(bp, &q_params);
300 		if (rc) {
301 			BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
302 			return rc;
303 		}
304 	}
305 out:
306 	/* Clean Context */
307 	if (bnx2x_vfq(vf, qid, cxt)) {
308 		bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
309 		bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
310 	}
311 
312 	return 0;
313 }
314 
315 static void
bnx2x_vf_set_igu_info(struct bnx2x * bp,u8 igu_sb_id,u8 abs_vfid)316 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
317 {
318 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
319 	if (vf) {
320 		/* the first igu entry belonging to VFs of this PF */
321 		if (!BP_VFDB(bp)->first_vf_igu_entry)
322 			BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
323 
324 		/* the first igu entry belonging to this VF */
325 		if (!vf_sb_count(vf))
326 			vf->igu_base_id = igu_sb_id;
327 
328 		++vf_sb_count(vf);
329 		++vf->sb_count;
330 	}
331 	BP_VFDB(bp)->vf_sbs_pool++;
332 }
333 
bnx2x_vf_vlan_mac_clear(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid,bool drv_only,int type)334 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
335 				   int qid, bool drv_only, int type)
336 {
337 	struct bnx2x_vlan_mac_ramrod_params ramrod;
338 	int rc;
339 
340 	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
341 			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
342 			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
343 
344 	/* Prepare ramrod params */
345 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
346 	if (type == BNX2X_VF_FILTER_VLAN_MAC) {
347 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
348 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
349 	} else if (type == BNX2X_VF_FILTER_MAC) {
350 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
351 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
352 	} else {
353 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
354 	}
355 	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
356 
357 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
358 	if (drv_only)
359 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
360 	else
361 		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
362 
363 	/* Start deleting */
364 	rc = ramrod.vlan_mac_obj->delete_all(bp,
365 					     ramrod.vlan_mac_obj,
366 					     &ramrod.user_req.vlan_mac_flags,
367 					     &ramrod.ramrod_flags);
368 	if (rc) {
369 		BNX2X_ERR("Failed to delete all %s\n",
370 			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
371 			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
372 		return rc;
373 	}
374 
375 	return 0;
376 }
377 
bnx2x_vf_mac_vlan_config(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid,struct bnx2x_vf_mac_vlan_filter * filter,bool drv_only)378 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
379 				    struct bnx2x_virtf *vf, int qid,
380 				    struct bnx2x_vf_mac_vlan_filter *filter,
381 				    bool drv_only)
382 {
383 	struct bnx2x_vlan_mac_ramrod_params ramrod;
384 	int rc;
385 
386 	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
387 	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
388 	   (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
389 	   (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
390 
391 	/* Prepare ramrod params */
392 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
393 	if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
394 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
395 		ramrod.user_req.u.vlan.vlan = filter->vid;
396 		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
397 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
398 	} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
399 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
400 		ramrod.user_req.u.vlan.vlan = filter->vid;
401 	} else {
402 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
403 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
404 		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
405 	}
406 	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
407 					    BNX2X_VLAN_MAC_DEL;
408 
409 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
410 	if (drv_only)
411 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
412 	else
413 		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
414 
415 	/* Add/Remove the filter */
416 	rc = bnx2x_config_vlan_mac(bp, &ramrod);
417 	if (rc == -EEXIST)
418 		return 0;
419 	if (rc) {
420 		BNX2X_ERR("Failed to %s %s\n",
421 			  filter->add ? "add" : "delete",
422 			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
423 				"VLAN-MAC" :
424 			  (filter->type == BNX2X_VF_FILTER_MAC) ?
425 				"MAC" : "VLAN");
426 		return rc;
427 	}
428 
429 	filter->applied = true;
430 
431 	return 0;
432 }
433 
bnx2x_vf_mac_vlan_config_list(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_vf_mac_vlan_filters * filters,int qid,bool drv_only)434 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
435 				  struct bnx2x_vf_mac_vlan_filters *filters,
436 				  int qid, bool drv_only)
437 {
438 	int rc = 0, i;
439 
440 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
441 
442 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
443 		return -EINVAL;
444 
445 	/* Prepare ramrod params */
446 	for (i = 0; i < filters->count; i++) {
447 		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
448 					      &filters->filters[i], drv_only);
449 		if (rc)
450 			break;
451 	}
452 
453 	/* Rollback if needed */
454 	if (i != filters->count) {
455 		BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
456 			  i, filters->count);
457 		while (--i >= 0) {
458 			if (!filters->filters[i].applied)
459 				continue;
460 			filters->filters[i].add = !filters->filters[i].add;
461 			bnx2x_vf_mac_vlan_config(bp, vf, qid,
462 						 &filters->filters[i],
463 						 drv_only);
464 		}
465 	}
466 
467 	/* It's our responsibility to free the filters */
468 	kfree(filters);
469 
470 	return rc;
471 }
472 
bnx2x_vf_queue_setup(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid,struct bnx2x_vf_queue_construct_params * qctor)473 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
474 			 struct bnx2x_vf_queue_construct_params *qctor)
475 {
476 	int rc;
477 
478 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
479 
480 	rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
481 	if (rc)
482 		goto op_err;
483 
484 	/* Schedule the configuration of any pending vlan filters */
485 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
486 			       BNX2X_MSG_IOV);
487 	return 0;
488 op_err:
489 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
490 	return rc;
491 }
492 
bnx2x_vf_queue_flr(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid)493 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
494 			       int qid)
495 {
496 	int rc;
497 
498 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
499 
500 	/* If needed, clean the filtering data base */
501 	if ((qid == LEADING_IDX) &&
502 	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
503 		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
504 					     BNX2X_VF_FILTER_VLAN_MAC);
505 		if (rc)
506 			goto op_err;
507 		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
508 					     BNX2X_VF_FILTER_VLAN);
509 		if (rc)
510 			goto op_err;
511 		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
512 					     BNX2X_VF_FILTER_MAC);
513 		if (rc)
514 			goto op_err;
515 	}
516 
517 	/* Terminate queue */
518 	if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
519 		struct bnx2x_queue_state_params qstate;
520 
521 		memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
522 		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
523 		qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
524 		qstate.cmd = BNX2X_Q_CMD_TERMINATE;
525 		set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
526 		rc = bnx2x_queue_state_change(bp, &qstate);
527 		if (rc)
528 			goto op_err;
529 	}
530 
531 	return 0;
532 op_err:
533 	BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
534 	return rc;
535 }
536 
bnx2x_vf_mcast(struct bnx2x * bp,struct bnx2x_virtf * vf,bnx2x_mac_addr_t * mcasts,int mc_num,bool drv_only)537 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
538 		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
539 {
540 	struct bnx2x_mcast_list_elem *mc = NULL;
541 	struct bnx2x_mcast_ramrod_params mcast;
542 	int rc, i;
543 
544 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
545 
546 	/* Prepare Multicast command */
547 	memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
548 	mcast.mcast_obj = &vf->mcast_obj;
549 	if (drv_only)
550 		set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
551 	else
552 		set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
553 	if (mc_num) {
554 		mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
555 			     GFP_KERNEL);
556 		if (!mc) {
557 			BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
558 			return -ENOMEM;
559 		}
560 	}
561 
562 	if (mc_num) {
563 		INIT_LIST_HEAD(&mcast.mcast_list);
564 		for (i = 0; i < mc_num; i++) {
565 			mc[i].mac = mcasts[i];
566 			list_add_tail(&mc[i].link,
567 				      &mcast.mcast_list);
568 		}
569 
570 		/* add new mcasts */
571 		mcast.mcast_list_len = mc_num;
572 		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
573 		if (rc)
574 			BNX2X_ERR("Failed to set multicasts\n");
575 	} else {
576 		/* clear existing mcasts */
577 		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
578 		if (rc)
579 			BNX2X_ERR("Failed to remove multicasts\n");
580 	}
581 
582 	kfree(mc);
583 
584 	return rc;
585 }
586 
bnx2x_vf_prep_rx_mode(struct bnx2x * bp,u8 qid,struct bnx2x_rx_mode_ramrod_params * ramrod,struct bnx2x_virtf * vf,unsigned long accept_flags)587 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
588 				  struct bnx2x_rx_mode_ramrod_params *ramrod,
589 				  struct bnx2x_virtf *vf,
590 				  unsigned long accept_flags)
591 {
592 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
593 
594 	memset(ramrod, 0, sizeof(*ramrod));
595 	ramrod->cid = vfq->cid;
596 	ramrod->cl_id = vfq_cl_id(vf, vfq);
597 	ramrod->rx_mode_obj = &bp->rx_mode_obj;
598 	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
599 	ramrod->rx_accept_flags = accept_flags;
600 	ramrod->tx_accept_flags = accept_flags;
601 	ramrod->pstate = &vf->filter_state;
602 	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
603 
604 	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
605 	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
606 	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
607 
608 	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
609 	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
610 }
611 
bnx2x_vf_rxmode(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid,unsigned long accept_flags)612 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
613 		    int qid, unsigned long accept_flags)
614 {
615 	struct bnx2x_rx_mode_ramrod_params ramrod;
616 
617 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
618 
619 	bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
620 	set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
621 	vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
622 	return bnx2x_config_rx_mode(bp, &ramrod);
623 }
624 
bnx2x_vf_queue_teardown(struct bnx2x * bp,struct bnx2x_virtf * vf,int qid)625 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
626 {
627 	int rc;
628 
629 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
630 
631 	/* Remove all classification configuration for leading queue */
632 	if (qid == LEADING_IDX) {
633 		rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
634 		if (rc)
635 			goto op_err;
636 
637 		/* Remove filtering if feasible */
638 		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
639 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
640 						     false,
641 						     BNX2X_VF_FILTER_VLAN_MAC);
642 			if (rc)
643 				goto op_err;
644 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
645 						     false,
646 						     BNX2X_VF_FILTER_VLAN);
647 			if (rc)
648 				goto op_err;
649 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
650 						     false,
651 						     BNX2X_VF_FILTER_MAC);
652 			if (rc)
653 				goto op_err;
654 			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
655 			if (rc)
656 				goto op_err;
657 		}
658 	}
659 
660 	/* Destroy queue */
661 	rc = bnx2x_vf_queue_destroy(bp, vf, qid);
662 	if (rc)
663 		goto op_err;
664 	return rc;
665 op_err:
666 	BNX2X_ERR("vf[%d:%d] error: rc %d\n",
667 		  vf->abs_vfid, qid, rc);
668 	return rc;
669 }
670 
671 /* VF enable primitives
672  * when pretend is required the caller is responsible
673  * for calling pretend prior to calling these routines
674  */
675 
676 /* internal vf enable - until vf is enabled internally all transactions
677  * are blocked. This routine should always be called last with pretend.
678  */
bnx2x_vf_enable_internal(struct bnx2x * bp,u8 enable)679 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
680 {
681 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
682 }
683 
684 /* clears vf error in all semi blocks */
bnx2x_vf_semi_clear_err(struct bnx2x * bp,u8 abs_vfid)685 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
686 {
687 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
688 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
689 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
690 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
691 }
692 
bnx2x_vf_pglue_clear_err(struct bnx2x * bp,u8 abs_vfid)693 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
694 {
695 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
696 	u32 was_err_reg = 0;
697 
698 	switch (was_err_group) {
699 	case 0:
700 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
701 	    break;
702 	case 1:
703 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
704 	    break;
705 	case 2:
706 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
707 	    break;
708 	case 3:
709 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
710 	    break;
711 	}
712 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
713 }
714 
bnx2x_vf_igu_reset(struct bnx2x * bp,struct bnx2x_virtf * vf)715 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
716 {
717 	int i;
718 	u32 val;
719 
720 	/* Set VF masks and configuration - pretend */
721 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
722 
723 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
724 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
725 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
726 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
727 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
728 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
729 
730 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
731 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
732 	val &= ~IGU_VF_CONF_PARENT_MASK;
733 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
734 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
735 
736 	DP(BNX2X_MSG_IOV,
737 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
738 	   vf->abs_vfid, val);
739 
740 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
741 
742 	/* iterate over all queues, clear sb consumer */
743 	for (i = 0; i < vf_sb_count(vf); i++) {
744 		u8 igu_sb_id = vf_igu_sb(vf, i);
745 
746 		/* zero prod memory */
747 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
748 
749 		/* clear sb state machine */
750 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
751 				       false /* VF */);
752 
753 		/* disable + update */
754 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
755 				    IGU_INT_DISABLE, 1);
756 	}
757 }
758 
bnx2x_vf_enable_access(struct bnx2x * bp,u8 abs_vfid)759 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
760 {
761 	u16 abs_fid;
762 
763 	abs_fid = FW_VF_HANDLE(abs_vfid);
764 
765 	/* set the VF-PF association in the FW */
766 	storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
767 	storm_memset_func_en(bp, abs_fid, 1);
768 
769 	/* Invalidate fp_hsi version for vfs */
770 	if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
771 		REG_WR8(bp, BAR_XSTRORM_INTMEM +
772 			    XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
773 
774 	/* clear vf errors*/
775 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
776 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
777 
778 	/* internal vf-enable - pretend */
779 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
780 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
781 	bnx2x_vf_enable_internal(bp, true);
782 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
783 }
784 
bnx2x_vf_enable_traffic(struct bnx2x * bp,struct bnx2x_virtf * vf)785 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
786 {
787 	/* Reset vf in IGU  interrupts are still disabled */
788 	bnx2x_vf_igu_reset(bp, vf);
789 
790 	/* pretend to enable the vf with the PBF */
791 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
792 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
793 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
794 }
795 
bnx2x_vf_is_pcie_pending(struct bnx2x * bp,u8 abs_vfid)796 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
797 {
798 	struct pci_dev *dev;
799 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
800 
801 	if (!vf)
802 		return false;
803 
804 	dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
805 	if (dev)
806 		return bnx2x_is_pcie_pending(dev);
807 	return false;
808 }
809 
bnx2x_vf_flr_clnup_epilog(struct bnx2x * bp,u8 abs_vfid)810 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
811 {
812 	/* Verify no pending pci transactions */
813 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
814 		BNX2X_ERR("PCIE Transactions still pending\n");
815 
816 	return 0;
817 }
818 
819 /* must be called after the number of PF queues and the number of VFs are
820  * both known
821  */
822 static void
bnx2x_iov_static_resc(struct bnx2x * bp,struct bnx2x_virtf * vf)823 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
824 {
825 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
826 
827 	/* will be set only during VF-ACQUIRE */
828 	resc->num_rxqs = 0;
829 	resc->num_txqs = 0;
830 
831 	resc->num_mac_filters = VF_MAC_CREDIT_CNT;
832 	resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
833 
834 	/* no real limitation */
835 	resc->num_mc_filters = 0;
836 
837 	/* num_sbs already set */
838 	resc->num_sbs = vf->sb_count;
839 }
840 
841 /* FLR routines: */
bnx2x_vf_free_resc(struct bnx2x * bp,struct bnx2x_virtf * vf)842 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
843 {
844 	/* reset the state variables */
845 	bnx2x_iov_static_resc(bp, vf);
846 	vf->state = VF_FREE;
847 }
848 
bnx2x_vf_flr_clnup_hw(struct bnx2x * bp,struct bnx2x_virtf * vf)849 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
850 {
851 	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
852 
853 	/* DQ usage counter */
854 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
855 	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
856 					"DQ VF usage counter timed out",
857 					poll_cnt);
858 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
859 
860 	/* FW cleanup command - poll for the results */
861 	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
862 				   poll_cnt))
863 		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
864 
865 	/* verify TX hw is flushed */
866 	bnx2x_tx_hw_flushed(bp, poll_cnt);
867 }
868 
bnx2x_vf_flr(struct bnx2x * bp,struct bnx2x_virtf * vf)869 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
870 {
871 	int rc, i;
872 
873 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
874 
875 	/* the cleanup operations are valid if and only if the VF
876 	 * was first acquired.
877 	 */
878 	for (i = 0; i < vf_rxq_count(vf); i++) {
879 		rc = bnx2x_vf_queue_flr(bp, vf, i);
880 		if (rc)
881 			goto out;
882 	}
883 
884 	/* remove multicasts */
885 	bnx2x_vf_mcast(bp, vf, NULL, 0, true);
886 
887 	/* dispatch final cleanup and wait for HW queues to flush */
888 	bnx2x_vf_flr_clnup_hw(bp, vf);
889 
890 	/* release VF resources */
891 	bnx2x_vf_free_resc(bp, vf);
892 
893 	vf->malicious = false;
894 
895 	/* re-open the mailbox */
896 	bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
897 	return;
898 out:
899 	BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
900 		  vf->abs_vfid, i, rc);
901 }
902 
bnx2x_vf_flr_clnup(struct bnx2x * bp)903 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
904 {
905 	struct bnx2x_virtf *vf;
906 	int i;
907 
908 	for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
909 		/* VF should be RESET & in FLR cleanup states */
910 		if (bnx2x_vf(bp, i, state) != VF_RESET ||
911 		    !bnx2x_vf(bp, i, flr_clnup_stage))
912 			continue;
913 
914 		DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
915 		   i, BNX2X_NR_VIRTFN(bp));
916 
917 		vf = BP_VF(bp, i);
918 
919 		/* lock the vf pf channel */
920 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
921 
922 		/* invoke the VF FLR SM */
923 		bnx2x_vf_flr(bp, vf);
924 
925 		/* mark the VF to be ACKED and continue */
926 		vf->flr_clnup_stage = false;
927 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
928 	}
929 
930 	/* Acknowledge the handled VFs.
931 	 * we are acknowledge all the vfs which an flr was requested for, even
932 	 * if amongst them there are such that we never opened, since the mcp
933 	 * will interrupt us immediately again if we only ack some of the bits,
934 	 * resulting in an endless loop. This can happen for example in KVM
935 	 * where an 'all ones' flr request is sometimes given by hyper visor
936 	 */
937 	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
938 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
939 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
940 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
941 			  bp->vfdb->flrd_vfs[i]);
942 
943 	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
944 
945 	/* clear the acked bits - better yet if the MCP implemented
946 	 * write to clear semantics
947 	 */
948 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
949 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
950 }
951 
bnx2x_vf_handle_flr_event(struct bnx2x * bp)952 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
953 {
954 	int i;
955 
956 	/* Read FLR'd VFs */
957 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
958 		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
959 
960 	DP(BNX2X_MSG_MCP,
961 	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
962 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
963 
964 	for_each_vf(bp, i) {
965 		struct bnx2x_virtf *vf = BP_VF(bp, i);
966 		u32 reset = 0;
967 
968 		if (vf->abs_vfid < 32)
969 			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
970 		else
971 			reset = bp->vfdb->flrd_vfs[1] &
972 				(1 << (vf->abs_vfid - 32));
973 
974 		if (reset) {
975 			/* set as reset and ready for cleanup */
976 			vf->state = VF_RESET;
977 			vf->flr_clnup_stage = true;
978 
979 			DP(BNX2X_MSG_IOV,
980 			   "Initiating Final cleanup for VF %d\n",
981 			   vf->abs_vfid);
982 		}
983 	}
984 
985 	/* do the FLR cleanup for all marked VFs*/
986 	bnx2x_vf_flr_clnup(bp);
987 }
988 
989 /* IOV global initialization routines  */
bnx2x_iov_init_dq(struct bnx2x * bp)990 void bnx2x_iov_init_dq(struct bnx2x *bp)
991 {
992 	if (!IS_SRIOV(bp))
993 		return;
994 
995 	/* Set the DQ such that the CID reflect the abs_vfid */
996 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
997 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
998 
999 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1000 	 * the PF L2 queues
1001 	 */
1002 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1003 
1004 	/* The VF window size is the log2 of the max number of CIDs per VF */
1005 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1006 
1007 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1008 	 * the Pf doorbell size although the 2 are independent.
1009 	 */
1010 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1011 
1012 	/* No security checks for now -
1013 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1014 	 * CID range 0 - 0x1ffff
1015 	 */
1016 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1017 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1018 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1019 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1020 
1021 	/* set the VF doorbell threshold. This threshold represents the amount
1022 	 * of doorbells allowed in the main DORQ fifo for a specific VF.
1023 	 */
1024 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1025 }
1026 
bnx2x_iov_init_dmae(struct bnx2x * bp)1027 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1028 {
1029 	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1030 		REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1031 }
1032 
bnx2x_vf_domain(struct bnx2x * bp,int vfid)1033 static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1034 {
1035 	struct pci_dev *dev = bp->pdev;
1036 
1037 	return pci_domain_nr(dev->bus);
1038 }
1039 
bnx2x_vf_bus(struct bnx2x * bp,int vfid)1040 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1041 {
1042 	struct pci_dev *dev = bp->pdev;
1043 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1044 
1045 	return dev->bus->number + ((dev->devfn + iov->offset +
1046 				    iov->stride * vfid) >> 8);
1047 }
1048 
bnx2x_vf_devfn(struct bnx2x * bp,int vfid)1049 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1050 {
1051 	struct pci_dev *dev = bp->pdev;
1052 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1053 
1054 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1055 }
1056 
bnx2x_vf_set_bars(struct bnx2x * bp,struct bnx2x_virtf * vf)1057 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1058 {
1059 	int i, n;
1060 	struct pci_dev *dev = bp->pdev;
1061 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1062 
1063 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1064 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1065 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1066 
1067 		size /= iov->total;
1068 		vf->bars[n].bar = start + size * vf->abs_vfid;
1069 		vf->bars[n].size = size;
1070 	}
1071 }
1072 
1073 static int
bnx2x_get_vf_igu_cam_info(struct bnx2x * bp)1074 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1075 {
1076 	int sb_id;
1077 	u32 val;
1078 	u8 fid, current_pf = 0;
1079 
1080 	/* IGU in normal mode - read CAM */
1081 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1082 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1083 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1084 			continue;
1085 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1086 		if (fid & IGU_FID_ENCODE_IS_PF)
1087 			current_pf = fid & IGU_FID_PF_NUM_MASK;
1088 		else if (current_pf == BP_FUNC(bp))
1089 			bnx2x_vf_set_igu_info(bp, sb_id,
1090 					      (fid & IGU_FID_VF_NUM_MASK));
1091 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1092 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1093 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1094 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1095 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1096 	}
1097 	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1098 	return BP_VFDB(bp)->vf_sbs_pool;
1099 }
1100 
__bnx2x_iov_free_vfdb(struct bnx2x * bp)1101 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1102 {
1103 	if (bp->vfdb) {
1104 		kfree(bp->vfdb->vfqs);
1105 		kfree(bp->vfdb->vfs);
1106 		kfree(bp->vfdb);
1107 	}
1108 	bp->vfdb = NULL;
1109 }
1110 
bnx2x_sriov_pci_cfg_info(struct bnx2x * bp,struct bnx2x_sriov * iov)1111 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1112 {
1113 	int pos;
1114 	struct pci_dev *dev = bp->pdev;
1115 
1116 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1117 	if (!pos) {
1118 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1119 		return -ENODEV;
1120 	}
1121 
1122 	iov->pos = pos;
1123 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1124 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1125 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1126 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1127 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1128 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1129 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1130 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1131 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1132 
1133 	return 0;
1134 }
1135 
bnx2x_sriov_info(struct bnx2x * bp,struct bnx2x_sriov * iov)1136 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1137 {
1138 	u32 val;
1139 
1140 	/* read the SRIOV capability structure
1141 	 * The fields can be read via configuration read or
1142 	 * directly from the device (starting at offset PCICFG_OFFSET)
1143 	 */
1144 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1145 		return -ENODEV;
1146 
1147 	/* get the number of SRIOV bars */
1148 	iov->nres = 0;
1149 
1150 	/* read the first_vfid */
1151 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1152 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1153 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1154 
1155 	DP(BNX2X_MSG_IOV,
1156 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1157 	   BP_FUNC(bp),
1158 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1159 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1160 
1161 	return 0;
1162 }
1163 
1164 /* must be called after PF bars are mapped */
bnx2x_iov_init_one(struct bnx2x * bp,int int_mode_param,int num_vfs_param)1165 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1166 		       int num_vfs_param)
1167 {
1168 	int err, i;
1169 	struct bnx2x_sriov *iov;
1170 	struct pci_dev *dev = bp->pdev;
1171 
1172 	bp->vfdb = NULL;
1173 
1174 	/* verify is pf */
1175 	if (IS_VF(bp))
1176 		return 0;
1177 
1178 	/* verify sriov capability is present in configuration space */
1179 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1180 		return 0;
1181 
1182 	/* verify chip revision */
1183 	if (CHIP_IS_E1x(bp))
1184 		return 0;
1185 
1186 	/* check if SRIOV support is turned off */
1187 	if (!num_vfs_param)
1188 		return 0;
1189 
1190 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1191 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1192 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1193 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1194 		return 0;
1195 	}
1196 
1197 	/* SRIOV can be enabled only with MSIX */
1198 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1199 	    int_mode_param == BNX2X_INT_MODE_INTX) {
1200 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1201 		return 0;
1202 	}
1203 
1204 	err = -EIO;
1205 	/* verify ari is enabled */
1206 	if (!pci_ari_enabled(bp->pdev->bus)) {
1207 		BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1208 		return 0;
1209 	}
1210 
1211 	/* verify igu is in normal mode */
1212 	if (CHIP_INT_MODE_IS_BC(bp)) {
1213 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1214 		return 0;
1215 	}
1216 
1217 	/* allocate the vfs database */
1218 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1219 	if (!bp->vfdb) {
1220 		BNX2X_ERR("failed to allocate vf database\n");
1221 		err = -ENOMEM;
1222 		goto failed;
1223 	}
1224 
1225 	/* get the sriov info - Linux already collected all the pertinent
1226 	 * information, however the sriov structure is for the private use
1227 	 * of the pci module. Also we want this information regardless
1228 	 * of the hyper-visor.
1229 	 */
1230 	iov = &(bp->vfdb->sriov);
1231 	err = bnx2x_sriov_info(bp, iov);
1232 	if (err)
1233 		goto failed;
1234 
1235 	/* SR-IOV capability was enabled but there are no VFs*/
1236 	if (iov->total == 0) {
1237 		err = 0;
1238 		goto failed;
1239 	}
1240 
1241 	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1242 
1243 	DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1244 	   num_vfs_param, iov->nr_virtfn);
1245 
1246 	/* allocate the vf array */
1247 	bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
1248 				sizeof(struct bnx2x_virtf),
1249 				GFP_KERNEL);
1250 	if (!bp->vfdb->vfs) {
1251 		BNX2X_ERR("failed to allocate vf array\n");
1252 		err = -ENOMEM;
1253 		goto failed;
1254 	}
1255 
1256 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1257 	for_each_vf(bp, i) {
1258 		bnx2x_vf(bp, i, index) = i;
1259 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1260 		bnx2x_vf(bp, i, state) = VF_FREE;
1261 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1262 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1263 		/* enable spoofchk by default */
1264 		bnx2x_vf(bp, i, spoofchk) = 1;
1265 	}
1266 
1267 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1268 	if (!bnx2x_get_vf_igu_cam_info(bp)) {
1269 		BNX2X_ERR("No entries in IGU CAM for vfs\n");
1270 		err = -EINVAL;
1271 		goto failed;
1272 	}
1273 
1274 	/* allocate the queue arrays for all VFs */
1275 	bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
1276 				 sizeof(struct bnx2x_vf_queue),
1277 				 GFP_KERNEL);
1278 
1279 	if (!bp->vfdb->vfqs) {
1280 		BNX2X_ERR("failed to allocate vf queue array\n");
1281 		err = -ENOMEM;
1282 		goto failed;
1283 	}
1284 
1285 	/* Prepare the VFs event synchronization mechanism */
1286 	mutex_init(&bp->vfdb->event_mutex);
1287 
1288 	mutex_init(&bp->vfdb->bulletin_mutex);
1289 
1290 	if (SHMEM2_HAS(bp, sriov_switch_mode))
1291 		SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1292 
1293 	return 0;
1294 failed:
1295 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1296 	__bnx2x_iov_free_vfdb(bp);
1297 	return err;
1298 }
1299 
bnx2x_iov_remove_one(struct bnx2x * bp)1300 void bnx2x_iov_remove_one(struct bnx2x *bp)
1301 {
1302 	int vf_idx;
1303 
1304 	/* if SRIOV is not enabled there's nothing to do */
1305 	if (!IS_SRIOV(bp))
1306 		return;
1307 
1308 	bnx2x_disable_sriov(bp);
1309 
1310 	/* disable access to all VFs */
1311 	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1312 		bnx2x_pretend_func(bp,
1313 				   HW_VF_HANDLE(bp,
1314 						bp->vfdb->sriov.first_vf_in_pf +
1315 						vf_idx));
1316 		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1317 		   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1318 		bnx2x_vf_enable_internal(bp, 0);
1319 		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1320 	}
1321 
1322 	/* free vf database */
1323 	__bnx2x_iov_free_vfdb(bp);
1324 }
1325 
bnx2x_iov_free_mem(struct bnx2x * bp)1326 void bnx2x_iov_free_mem(struct bnx2x *bp)
1327 {
1328 	int i;
1329 
1330 	if (!IS_SRIOV(bp))
1331 		return;
1332 
1333 	/* free vfs hw contexts */
1334 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1335 		struct hw_dma *cxt = &bp->vfdb->context[i];
1336 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1337 	}
1338 
1339 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1340 		       BP_VFDB(bp)->sp_dma.mapping,
1341 		       BP_VFDB(bp)->sp_dma.size);
1342 
1343 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1344 		       BP_VF_MBX_DMA(bp)->mapping,
1345 		       BP_VF_MBX_DMA(bp)->size);
1346 
1347 	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1348 		       BP_VF_BULLETIN_DMA(bp)->mapping,
1349 		       BP_VF_BULLETIN_DMA(bp)->size);
1350 }
1351 
bnx2x_iov_alloc_mem(struct bnx2x * bp)1352 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1353 {
1354 	size_t tot_size;
1355 	int i, rc = 0;
1356 
1357 	if (!IS_SRIOV(bp))
1358 		return rc;
1359 
1360 	/* allocate vfs hw contexts */
1361 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1362 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1363 
1364 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1365 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1366 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1367 
1368 		if (cxt->size) {
1369 			cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1370 			if (!cxt->addr)
1371 				goto alloc_mem_err;
1372 		} else {
1373 			cxt->addr = NULL;
1374 			cxt->mapping = 0;
1375 		}
1376 		tot_size -= cxt->size;
1377 	}
1378 
1379 	/* allocate vfs ramrods dma memory - client_init and set_mac */
1380 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1381 	BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1382 						   tot_size);
1383 	if (!BP_VFDB(bp)->sp_dma.addr)
1384 		goto alloc_mem_err;
1385 	BP_VFDB(bp)->sp_dma.size = tot_size;
1386 
1387 	/* allocate mailboxes */
1388 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1389 	BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1390 						  tot_size);
1391 	if (!BP_VF_MBX_DMA(bp)->addr)
1392 		goto alloc_mem_err;
1393 
1394 	BP_VF_MBX_DMA(bp)->size = tot_size;
1395 
1396 	/* allocate local bulletin boards */
1397 	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1398 	BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1399 						       tot_size);
1400 	if (!BP_VF_BULLETIN_DMA(bp)->addr)
1401 		goto alloc_mem_err;
1402 
1403 	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1404 
1405 	return 0;
1406 
1407 alloc_mem_err:
1408 	return -ENOMEM;
1409 }
1410 
bnx2x_vfq_init(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_vf_queue * q)1411 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1412 			   struct bnx2x_vf_queue *q)
1413 {
1414 	u8 cl_id = vfq_cl_id(vf, q);
1415 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1416 	unsigned long q_type = 0;
1417 
1418 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1419 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1420 
1421 	/* Queue State object */
1422 	bnx2x_init_queue_obj(bp, &q->sp_obj,
1423 			     cl_id, &q->cid, 1, func_id,
1424 			     bnx2x_vf_sp(bp, vf, q_data),
1425 			     bnx2x_vf_sp_map(bp, vf, q_data),
1426 			     q_type);
1427 
1428 	/* sp indication is set only when vlan/mac/etc. are initialized */
1429 	q->sp_initialized = false;
1430 
1431 	DP(BNX2X_MSG_IOV,
1432 	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1433 	   vf->abs_vfid, q->sp_obj.func_id, q->cid);
1434 }
1435 
bnx2x_max_speed_cap(struct bnx2x * bp)1436 static int bnx2x_max_speed_cap(struct bnx2x *bp)
1437 {
1438 	u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1439 
1440 	if (supported &
1441 	    (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1442 		return 20000;
1443 
1444 	return 10000; /* assume lowest supported speed is 10G */
1445 }
1446 
bnx2x_iov_link_update_vf(struct bnx2x * bp,int idx)1447 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1448 {
1449 	struct bnx2x_link_report_data *state = &bp->last_reported_link;
1450 	struct pf_vf_bulletin_content *bulletin;
1451 	struct bnx2x_virtf *vf;
1452 	bool update = true;
1453 	int rc = 0;
1454 
1455 	/* sanity and init */
1456 	rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1457 	if (rc)
1458 		return rc;
1459 
1460 	mutex_lock(&bp->vfdb->bulletin_mutex);
1461 
1462 	if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1463 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1464 
1465 		bulletin->link_speed = state->line_speed;
1466 		bulletin->link_flags = 0;
1467 		if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1468 			     &state->link_report_flags))
1469 			bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1470 		if (test_bit(BNX2X_LINK_REPORT_FD,
1471 			     &state->link_report_flags))
1472 			bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1473 		if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1474 			     &state->link_report_flags))
1475 			bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1476 		if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1477 			     &state->link_report_flags))
1478 			bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1479 	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1480 		   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1481 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1482 		bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1483 	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1484 		   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1485 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1486 		bulletin->link_speed = bnx2x_max_speed_cap(bp);
1487 		bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1488 	} else {
1489 		update = false;
1490 	}
1491 
1492 	if (update) {
1493 		DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1494 		   "vf %d mode %u speed %d flags %x\n", idx,
1495 		   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1496 
1497 		/* Post update on VF's bulletin board */
1498 		rc = bnx2x_post_vf_bulletin(bp, idx);
1499 		if (rc) {
1500 			BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1501 			goto out;
1502 		}
1503 	}
1504 
1505 out:
1506 	mutex_unlock(&bp->vfdb->bulletin_mutex);
1507 	return rc;
1508 }
1509 
bnx2x_set_vf_link_state(struct net_device * dev,int idx,int link_state)1510 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1511 {
1512 	struct bnx2x *bp = netdev_priv(dev);
1513 	struct bnx2x_virtf *vf = BP_VF(bp, idx);
1514 
1515 	if (!vf)
1516 		return -EINVAL;
1517 
1518 	if (vf->link_cfg == link_state)
1519 		return 0; /* nothing todo */
1520 
1521 	vf->link_cfg = link_state;
1522 
1523 	return bnx2x_iov_link_update_vf(bp, idx);
1524 }
1525 
bnx2x_iov_link_update(struct bnx2x * bp)1526 void bnx2x_iov_link_update(struct bnx2x *bp)
1527 {
1528 	int vfid;
1529 
1530 	if (!IS_SRIOV(bp))
1531 		return;
1532 
1533 	for_each_vf(bp, vfid)
1534 		bnx2x_iov_link_update_vf(bp, vfid);
1535 }
1536 
1537 /* called by bnx2x_nic_load */
bnx2x_iov_nic_init(struct bnx2x * bp)1538 int bnx2x_iov_nic_init(struct bnx2x *bp)
1539 {
1540 	int vfid;
1541 
1542 	if (!IS_SRIOV(bp)) {
1543 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1544 		return 0;
1545 	}
1546 
1547 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1548 
1549 	/* let FLR complete ... */
1550 	msleep(100);
1551 
1552 	/* initialize vf database */
1553 	for_each_vf(bp, vfid) {
1554 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1555 
1556 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1557 			BNX2X_CIDS_PER_VF;
1558 
1559 		union cdu_context *base_cxt = (union cdu_context *)
1560 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1561 			(base_vf_cid & (ILT_PAGE_CIDS-1));
1562 
1563 		DP(BNX2X_MSG_IOV,
1564 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1565 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1566 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1567 
1568 		/* init statically provisioned resources */
1569 		bnx2x_iov_static_resc(bp, vf);
1570 
1571 		/* queues are initialized during VF-ACQUIRE */
1572 		vf->filter_state = 0;
1573 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1574 
1575 		bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1576 				       vf_vlan_rules_cnt(vf));
1577 		bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1578 				       vf_mac_rules_cnt(vf));
1579 
1580 		/*  init mcast object - This object will be re-initialized
1581 		 *  during VF-ACQUIRE with the proper cl_id and cid.
1582 		 *  It needs to be initialized here so that it can be safely
1583 		 *  handled by a subsequent FLR flow.
1584 		 */
1585 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1586 				     0xFF, 0xFF, 0xFF,
1587 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1588 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1589 				     BNX2X_FILTER_MCAST_PENDING,
1590 				     &vf->filter_state,
1591 				     BNX2X_OBJ_TYPE_RX_TX);
1592 
1593 		/* set the mailbox message addresses */
1594 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1595 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1596 			MBX_MSG_ALIGNED_SIZE);
1597 
1598 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1599 			vfid * MBX_MSG_ALIGNED_SIZE;
1600 
1601 		/* Enable vf mailbox */
1602 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1603 	}
1604 
1605 	/* Final VF init */
1606 	for_each_vf(bp, vfid) {
1607 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1608 
1609 		/* fill in the BDF and bars */
1610 		vf->domain = bnx2x_vf_domain(bp, vfid);
1611 		vf->bus = bnx2x_vf_bus(bp, vfid);
1612 		vf->devfn = bnx2x_vf_devfn(bp, vfid);
1613 		bnx2x_vf_set_bars(bp, vf);
1614 
1615 		DP(BNX2X_MSG_IOV,
1616 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1617 		   vf->abs_vfid, vf->bus, vf->devfn,
1618 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1619 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1620 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 /* called by bnx2x_chip_cleanup */
bnx2x_iov_chip_cleanup(struct bnx2x * bp)1627 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1628 {
1629 	int i;
1630 
1631 	if (!IS_SRIOV(bp))
1632 		return 0;
1633 
1634 	/* release all the VFs */
1635 	for_each_vf(bp, i)
1636 		bnx2x_vf_release(bp, BP_VF(bp, i));
1637 
1638 	return 0;
1639 }
1640 
1641 /* called by bnx2x_init_hw_func, returns the next ilt line */
bnx2x_iov_init_ilt(struct bnx2x * bp,u16 line)1642 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1643 {
1644 	int i;
1645 	struct bnx2x_ilt *ilt = BP_ILT(bp);
1646 
1647 	if (!IS_SRIOV(bp))
1648 		return line;
1649 
1650 	/* set vfs ilt lines */
1651 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1652 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1653 
1654 		ilt->lines[line+i].page = hw_cxt->addr;
1655 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1656 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1657 	}
1658 	return line + i;
1659 }
1660 
bnx2x_iov_is_vf_cid(struct bnx2x * bp,u16 cid)1661 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1662 {
1663 	return ((cid >= BNX2X_FIRST_VF_CID) &&
1664 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1665 }
1666 
1667 static
bnx2x_vf_handle_classification_eqe(struct bnx2x * bp,struct bnx2x_vf_queue * vfq,union event_ring_elem * elem)1668 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1669 					struct bnx2x_vf_queue *vfq,
1670 					union event_ring_elem *elem)
1671 {
1672 	unsigned long ramrod_flags = 0;
1673 	int rc = 0;
1674 	u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1675 
1676 	/* Always push next commands out, don't wait here */
1677 	set_bit(RAMROD_CONT, &ramrod_flags);
1678 
1679 	switch (echo >> BNX2X_SWCID_SHIFT) {
1680 	case BNX2X_FILTER_MAC_PENDING:
1681 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1682 					   &ramrod_flags);
1683 		break;
1684 	case BNX2X_FILTER_VLAN_PENDING:
1685 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1686 					    &ramrod_flags);
1687 		break;
1688 	default:
1689 		BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1690 		return;
1691 	}
1692 	if (rc < 0)
1693 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1694 	else if (rc > 0)
1695 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1696 }
1697 
1698 static
bnx2x_vf_handle_mcast_eqe(struct bnx2x * bp,struct bnx2x_virtf * vf)1699 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1700 			       struct bnx2x_virtf *vf)
1701 {
1702 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
1703 	int rc;
1704 
1705 	rparam.mcast_obj = &vf->mcast_obj;
1706 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1707 
1708 	/* If there are pending mcast commands - send them */
1709 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1710 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1711 		if (rc < 0)
1712 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1713 				  rc);
1714 	}
1715 }
1716 
1717 static
bnx2x_vf_handle_filters_eqe(struct bnx2x * bp,struct bnx2x_virtf * vf)1718 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1719 				 struct bnx2x_virtf *vf)
1720 {
1721 	smp_mb__before_atomic();
1722 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1723 	smp_mb__after_atomic();
1724 }
1725 
bnx2x_vf_handle_rss_update_eqe(struct bnx2x * bp,struct bnx2x_virtf * vf)1726 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1727 					   struct bnx2x_virtf *vf)
1728 {
1729 	vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1730 }
1731 
bnx2x_iov_eq_sp_event(struct bnx2x * bp,union event_ring_elem * elem)1732 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1733 {
1734 	struct bnx2x_virtf *vf;
1735 	int qidx = 0, abs_vfid;
1736 	u8 opcode;
1737 	u16 cid = 0xffff;
1738 
1739 	if (!IS_SRIOV(bp))
1740 		return 1;
1741 
1742 	/* first get the cid - the only events we handle here are cfc-delete
1743 	 * and set-mac completion
1744 	 */
1745 	opcode = elem->message.opcode;
1746 
1747 	switch (opcode) {
1748 	case EVENT_RING_OPCODE_CFC_DEL:
1749 		cid = SW_CID(elem->message.data.cfc_del_event.cid);
1750 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1751 		break;
1752 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1753 	case EVENT_RING_OPCODE_MULTICAST_RULES:
1754 	case EVENT_RING_OPCODE_FILTERS_RULES:
1755 	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1756 		cid = SW_CID(elem->message.data.eth_event.echo);
1757 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1758 		break;
1759 	case EVENT_RING_OPCODE_VF_FLR:
1760 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
1761 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1762 		   abs_vfid);
1763 		goto get_vf;
1764 	case EVENT_RING_OPCODE_MALICIOUS_VF:
1765 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1766 		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1767 			  abs_vfid,
1768 			  elem->message.data.malicious_vf_event.err_id);
1769 		goto get_vf;
1770 	default:
1771 		return 1;
1772 	}
1773 
1774 	/* check if the cid is the VF range */
1775 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1776 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1777 		return 1;
1778 	}
1779 
1780 	/* extract vf and rxq index from vf_cid - relies on the following:
1781 	 * 1. vfid on cid reflects the true abs_vfid
1782 	 * 2. The max number of VFs (per path) is 64
1783 	 */
1784 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1785 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1786 get_vf:
1787 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1788 
1789 	if (!vf) {
1790 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1791 			  cid, abs_vfid);
1792 		return 0;
1793 	}
1794 
1795 	switch (opcode) {
1796 	case EVENT_RING_OPCODE_CFC_DEL:
1797 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1798 		   vf->abs_vfid, qidx);
1799 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1800 						       &vfq_get(vf,
1801 								qidx)->sp_obj,
1802 						       BNX2X_Q_CMD_CFC_DEL);
1803 		break;
1804 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1805 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1806 		   vf->abs_vfid, qidx);
1807 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1808 		break;
1809 	case EVENT_RING_OPCODE_MULTICAST_RULES:
1810 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1811 		   vf->abs_vfid, qidx);
1812 		bnx2x_vf_handle_mcast_eqe(bp, vf);
1813 		break;
1814 	case EVENT_RING_OPCODE_FILTERS_RULES:
1815 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1816 		   vf->abs_vfid, qidx);
1817 		bnx2x_vf_handle_filters_eqe(bp, vf);
1818 		break;
1819 	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1820 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1821 		   vf->abs_vfid, qidx);
1822 		bnx2x_vf_handle_rss_update_eqe(bp, vf);
1823 		fallthrough;
1824 	case EVENT_RING_OPCODE_VF_FLR:
1825 		/* Do nothing for now */
1826 		return 0;
1827 	case EVENT_RING_OPCODE_MALICIOUS_VF:
1828 		vf->malicious = true;
1829 		return 0;
1830 	}
1831 
1832 	return 0;
1833 }
1834 
bnx2x_vf_by_cid(struct bnx2x * bp,int vf_cid)1835 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1836 {
1837 	/* extract the vf from vf_cid - relies on the following:
1838 	 * 1. vfid on cid reflects the true abs_vfid
1839 	 * 2. The max number of VFs (per path) is 64
1840 	 */
1841 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1842 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1843 }
1844 
bnx2x_iov_set_queue_sp_obj(struct bnx2x * bp,int vf_cid,struct bnx2x_queue_sp_obj ** q_obj)1845 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1846 				struct bnx2x_queue_sp_obj **q_obj)
1847 {
1848 	struct bnx2x_virtf *vf;
1849 
1850 	if (!IS_SRIOV(bp))
1851 		return;
1852 
1853 	vf = bnx2x_vf_by_cid(bp, vf_cid);
1854 
1855 	if (vf) {
1856 		/* extract queue index from vf_cid - relies on the following:
1857 		 * 1. vfid on cid reflects the true abs_vfid
1858 		 * 2. The max number of VFs (per path) is 64
1859 		 */
1860 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1861 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1862 	} else {
1863 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1864 	}
1865 }
1866 
bnx2x_iov_adjust_stats_req(struct bnx2x * bp)1867 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1868 {
1869 	int i;
1870 	int first_queue_query_index, num_queues_req;
1871 	dma_addr_t cur_data_offset;
1872 	struct stats_query_entry *cur_query_entry;
1873 	u8 stats_count = 0;
1874 	bool is_fcoe = false;
1875 
1876 	if (!IS_SRIOV(bp))
1877 		return;
1878 
1879 	if (!NO_FCOE(bp))
1880 		is_fcoe = true;
1881 
1882 	/* fcoe adds one global request and one queue request */
1883 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1884 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1885 		(is_fcoe ? 0 : 1);
1886 
1887 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1888 	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1889 	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1890 	       first_queue_query_index + num_queues_req);
1891 
1892 	cur_data_offset = bp->fw_stats_data_mapping +
1893 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1894 		num_queues_req * sizeof(struct per_queue_stats);
1895 
1896 	cur_query_entry = &bp->fw_stats_req->
1897 		query[first_queue_query_index + num_queues_req];
1898 
1899 	for_each_vf(bp, i) {
1900 		int j;
1901 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1902 
1903 		if (vf->state != VF_ENABLED) {
1904 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1905 			       "vf %d not enabled so no stats for it\n",
1906 			       vf->abs_vfid);
1907 			continue;
1908 		}
1909 
1910 		if (vf->malicious) {
1911 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1912 			       "vf %d malicious so no stats for it\n",
1913 			       vf->abs_vfid);
1914 			continue;
1915 		}
1916 
1917 		DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1918 		       "add addresses for vf %d\n", vf->abs_vfid);
1919 		for_each_vfq(vf, j) {
1920 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1921 
1922 			dma_addr_t q_stats_addr =
1923 				vf->fw_stat_map + j * vf->stats_stride;
1924 
1925 			/* collect stats fro active queues only */
1926 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1927 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
1928 				continue;
1929 
1930 			/* create stats query entry for this queue */
1931 			cur_query_entry->kind = STATS_TYPE_QUEUE;
1932 			cur_query_entry->index = vfq_stat_id(vf, rxq);
1933 			cur_query_entry->funcID =
1934 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1935 			cur_query_entry->address.hi =
1936 				cpu_to_le32(U64_HI(q_stats_addr));
1937 			cur_query_entry->address.lo =
1938 				cpu_to_le32(U64_LO(q_stats_addr));
1939 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1940 			       "added address %x %x for vf %d queue %d client %d\n",
1941 			       cur_query_entry->address.hi,
1942 			       cur_query_entry->address.lo,
1943 			       cur_query_entry->funcID,
1944 			       j, cur_query_entry->index);
1945 			cur_query_entry++;
1946 			cur_data_offset += sizeof(struct per_queue_stats);
1947 			stats_count++;
1948 
1949 			/* all stats are coalesced to the leading queue */
1950 			if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1951 				break;
1952 		}
1953 	}
1954 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1955 }
1956 
1957 /* VF API helpers */
bnx2x_vf_qtbl_set_q(struct bnx2x * bp,u8 abs_vfid,u8 qid,u8 enable)1958 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1959 				u8 enable)
1960 {
1961 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1962 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1963 
1964 	REG_WR(bp, reg, val);
1965 }
1966 
bnx2x_vf_clr_qtbl(struct bnx2x * bp,struct bnx2x_virtf * vf)1967 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1968 {
1969 	int i;
1970 
1971 	for_each_vfq(vf, i)
1972 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1973 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
1974 }
1975 
bnx2x_vf_igu_disable(struct bnx2x * bp,struct bnx2x_virtf * vf)1976 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1977 {
1978 	u32 val;
1979 
1980 	/* clear the VF configuration - pretend */
1981 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1982 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1983 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1984 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1985 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1986 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1987 }
1988 
bnx2x_vf_max_queue_cnt(struct bnx2x * bp,struct bnx2x_virtf * vf)1989 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1990 {
1991 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1992 		     BNX2X_VF_MAX_QUEUES);
1993 }
1994 
1995 static
bnx2x_vf_chk_avail_resc(struct bnx2x * bp,struct bnx2x_virtf * vf,struct vf_pf_resc_request * req_resc)1996 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1997 			    struct vf_pf_resc_request *req_resc)
1998 {
1999 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2000 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2001 
2002 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2003 		(req_resc->num_txqs <= txq_cnt) &&
2004 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2005 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2006 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2007 }
2008 
2009 /* CORE VF API */
bnx2x_vf_acquire(struct bnx2x * bp,struct bnx2x_virtf * vf,struct vf_pf_resc_request * resc)2010 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2011 		     struct vf_pf_resc_request *resc)
2012 {
2013 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2014 		BNX2X_CIDS_PER_VF;
2015 
2016 	union cdu_context *base_cxt = (union cdu_context *)
2017 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2018 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2019 	int i;
2020 
2021 	/* if state is 'acquired' the VF was not released or FLR'd, in
2022 	 * this case the returned resources match the acquired already
2023 	 * acquired resources. Verify that the requested numbers do
2024 	 * not exceed the already acquired numbers.
2025 	 */
2026 	if (vf->state == VF_ACQUIRED) {
2027 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2028 		   vf->abs_vfid);
2029 
2030 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2031 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2032 				  vf->abs_vfid);
2033 			return -EINVAL;
2034 		}
2035 		return 0;
2036 	}
2037 
2038 	/* Otherwise vf state must be 'free' or 'reset' */
2039 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2040 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2041 			  vf->abs_vfid, vf->state);
2042 		return -EINVAL;
2043 	}
2044 
2045 	/* static allocation:
2046 	 * the global maximum number are fixed per VF. Fail the request if
2047 	 * requested number exceed these globals
2048 	 */
2049 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2050 		DP(BNX2X_MSG_IOV,
2051 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2052 		/* set the max resource in the vf */
2053 		return -ENOMEM;
2054 	}
2055 
2056 	/* Set resources counters - 0 request means max available */
2057 	vf_sb_count(vf) = resc->num_sbs;
2058 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2059 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2060 
2061 	DP(BNX2X_MSG_IOV,
2062 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2063 	   vf_sb_count(vf), vf_rxq_count(vf),
2064 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2065 	   vf_vlan_rules_cnt(vf));
2066 
2067 	/* Initialize the queues */
2068 	if (!vf->vfqs) {
2069 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2070 		return -EINVAL;
2071 	}
2072 
2073 	for_each_vfq(vf, i) {
2074 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2075 
2076 		if (!q) {
2077 			BNX2X_ERR("q number %d was not allocated\n", i);
2078 			return -EINVAL;
2079 		}
2080 
2081 		q->index = i;
2082 		q->cxt = &((base_cxt + i)->eth);
2083 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2084 
2085 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2086 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2087 
2088 		/* init SP objects */
2089 		bnx2x_vfq_init(bp, vf, q);
2090 	}
2091 	vf->state = VF_ACQUIRED;
2092 	return 0;
2093 }
2094 
bnx2x_vf_init(struct bnx2x * bp,struct bnx2x_virtf * vf,dma_addr_t * sb_map)2095 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2096 {
2097 	struct bnx2x_func_init_params func_init = {0};
2098 	int i;
2099 
2100 	/* the sb resources are initialized at this point, do the
2101 	 * FW/HW initializations
2102 	 */
2103 	for_each_vf_sb(vf, i)
2104 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2105 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2106 
2107 	/* Sanity checks */
2108 	if (vf->state != VF_ACQUIRED) {
2109 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2110 		   vf->abs_vfid, vf->state);
2111 		return -EINVAL;
2112 	}
2113 
2114 	/* let FLR complete ... */
2115 	msleep(100);
2116 
2117 	/* FLR cleanup epilogue */
2118 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2119 		return -EBUSY;
2120 
2121 	/* reset IGU VF statistics: MSIX */
2122 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2123 
2124 	/* function setup */
2125 	func_init.pf_id = BP_FUNC(bp);
2126 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2127 	bnx2x_func_init(bp, &func_init);
2128 
2129 	/* Enable the vf */
2130 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2131 	bnx2x_vf_enable_traffic(bp, vf);
2132 
2133 	/* queue protection table */
2134 	for_each_vfq(vf, i)
2135 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2136 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2137 
2138 	vf->state = VF_ENABLED;
2139 
2140 	/* update vf bulletin board */
2141 	bnx2x_post_vf_bulletin(bp, vf->index);
2142 
2143 	return 0;
2144 }
2145 
2146 struct set_vf_state_cookie {
2147 	struct bnx2x_virtf *vf;
2148 	u8 state;
2149 };
2150 
bnx2x_set_vf_state(void * cookie)2151 static void bnx2x_set_vf_state(void *cookie)
2152 {
2153 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2154 
2155 	p->vf->state = p->state;
2156 }
2157 
bnx2x_vf_close(struct bnx2x * bp,struct bnx2x_virtf * vf)2158 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2159 {
2160 	int rc = 0, i;
2161 
2162 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2163 
2164 	/* Close all queues */
2165 	for (i = 0; i < vf_rxq_count(vf); i++) {
2166 		rc = bnx2x_vf_queue_teardown(bp, vf, i);
2167 		if (rc)
2168 			goto op_err;
2169 	}
2170 
2171 	/* disable the interrupts */
2172 	DP(BNX2X_MSG_IOV, "disabling igu\n");
2173 	bnx2x_vf_igu_disable(bp, vf);
2174 
2175 	/* disable the VF */
2176 	DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2177 	bnx2x_vf_clr_qtbl(bp, vf);
2178 
2179 	/* need to make sure there are no outstanding stats ramrods which may
2180 	 * cause the device to access the VF's stats buffer which it will free
2181 	 * as soon as we return from the close flow.
2182 	 */
2183 	{
2184 		struct set_vf_state_cookie cookie;
2185 
2186 		cookie.vf = vf;
2187 		cookie.state = VF_ACQUIRED;
2188 		rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2189 		if (rc)
2190 			goto op_err;
2191 	}
2192 
2193 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2194 
2195 	return 0;
2196 op_err:
2197 	BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2198 	return rc;
2199 }
2200 
2201 /* VF release can be called either: 1. The VF was acquired but
2202  * not enabled 2. the vf was enabled or in the process of being
2203  * enabled
2204  */
bnx2x_vf_free(struct bnx2x * bp,struct bnx2x_virtf * vf)2205 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2206 {
2207 	int rc;
2208 
2209 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2210 	   vf->state == VF_FREE ? "Free" :
2211 	   vf->state == VF_ACQUIRED ? "Acquired" :
2212 	   vf->state == VF_ENABLED ? "Enabled" :
2213 	   vf->state == VF_RESET ? "Reset" :
2214 	   "Unknown");
2215 
2216 	switch (vf->state) {
2217 	case VF_ENABLED:
2218 		rc = bnx2x_vf_close(bp, vf);
2219 		if (rc)
2220 			goto op_err;
2221 		fallthrough;	/* to release resources */
2222 	case VF_ACQUIRED:
2223 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2224 		bnx2x_vf_free_resc(bp, vf);
2225 		break;
2226 
2227 	case VF_FREE:
2228 	case VF_RESET:
2229 	default:
2230 		break;
2231 	}
2232 	return 0;
2233 op_err:
2234 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2235 	return rc;
2236 }
2237 
bnx2x_vf_rss_update(struct bnx2x * bp,struct bnx2x_virtf * vf,struct bnx2x_config_rss_params * rss)2238 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2239 			struct bnx2x_config_rss_params *rss)
2240 {
2241 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2242 	set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2243 	return bnx2x_config_rss(bp, rss);
2244 }
2245 
bnx2x_vf_tpa_update(struct bnx2x * bp,struct bnx2x_virtf * vf,struct vfpf_tpa_tlv * tlv,struct bnx2x_queue_update_tpa_params * params)2246 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2247 			struct vfpf_tpa_tlv *tlv,
2248 			struct bnx2x_queue_update_tpa_params *params)
2249 {
2250 	aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2251 	struct bnx2x_queue_state_params qstate;
2252 	int qid, rc = 0;
2253 
2254 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2255 
2256 	/* Set ramrod params */
2257 	memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2258 	memcpy(&qstate.params.update_tpa, params,
2259 	       sizeof(struct bnx2x_queue_update_tpa_params));
2260 	qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2261 	set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2262 
2263 	for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2264 		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2265 		qstate.params.update_tpa.sge_map = sge_addr[qid];
2266 		DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2267 		   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2268 		   U64_LO(sge_addr[qid]));
2269 		rc = bnx2x_queue_state_change(bp, &qstate);
2270 		if (rc) {
2271 			BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2272 				  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2273 				  vf->abs_vfid, qid);
2274 			return rc;
2275 		}
2276 	}
2277 
2278 	return rc;
2279 }
2280 
2281 /* VF release ~ VF close + VF release-resources
2282  * Release is the ultimate SW shutdown and is called whenever an
2283  * irrecoverable error is encountered.
2284  */
bnx2x_vf_release(struct bnx2x * bp,struct bnx2x_virtf * vf)2285 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2286 {
2287 	int rc;
2288 
2289 	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2290 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2291 
2292 	rc = bnx2x_vf_free(bp, vf);
2293 	if (rc)
2294 		WARN(rc,
2295 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2296 		     vf->abs_vfid, rc);
2297 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2298 	return rc;
2299 }
2300 
bnx2x_lock_vf_pf_channel(struct bnx2x * bp,struct bnx2x_virtf * vf,enum channel_tlvs tlv)2301 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2302 			      enum channel_tlvs tlv)
2303 {
2304 	/* we don't lock the channel for unsupported tlvs */
2305 	if (!bnx2x_tlv_supported(tlv)) {
2306 		BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2307 		return;
2308 	}
2309 
2310 	/* lock the channel */
2311 	mutex_lock(&vf->op_mutex);
2312 
2313 	/* record the locking op */
2314 	vf->op_current = tlv;
2315 
2316 	/* log the lock */
2317 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2318 	   vf->abs_vfid, tlv);
2319 }
2320 
bnx2x_unlock_vf_pf_channel(struct bnx2x * bp,struct bnx2x_virtf * vf,enum channel_tlvs expected_tlv)2321 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2322 				enum channel_tlvs expected_tlv)
2323 {
2324 	enum channel_tlvs current_tlv;
2325 
2326 	if (!vf) {
2327 		BNX2X_ERR("VF was %p\n", vf);
2328 		return;
2329 	}
2330 
2331 	current_tlv = vf->op_current;
2332 
2333 	/* we don't unlock the channel for unsupported tlvs */
2334 	if (!bnx2x_tlv_supported(expected_tlv))
2335 		return;
2336 
2337 	WARN(expected_tlv != vf->op_current,
2338 	     "lock mismatch: expected %d found %d", expected_tlv,
2339 	     vf->op_current);
2340 
2341 	/* record the locking op */
2342 	vf->op_current = CHANNEL_TLV_NONE;
2343 
2344 	/* lock the channel */
2345 	mutex_unlock(&vf->op_mutex);
2346 
2347 	/* log the unlock */
2348 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2349 	   vf->abs_vfid, current_tlv);
2350 }
2351 
bnx2x_set_pf_tx_switching(struct bnx2x * bp,bool enable)2352 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2353 {
2354 	struct bnx2x_queue_state_params q_params;
2355 	u32 prev_flags;
2356 	int i, rc;
2357 
2358 	/* Verify changes are needed and record current Tx switching state */
2359 	prev_flags = bp->flags;
2360 	if (enable)
2361 		bp->flags |= TX_SWITCHING;
2362 	else
2363 		bp->flags &= ~TX_SWITCHING;
2364 	if (prev_flags == bp->flags)
2365 		return 0;
2366 
2367 	/* Verify state enables the sending of queue ramrods */
2368 	if ((bp->state != BNX2X_STATE_OPEN) ||
2369 	    (bnx2x_get_q_logical_state(bp,
2370 				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2371 	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
2372 		return 0;
2373 
2374 	/* send q. update ramrod to configure Tx switching */
2375 	memset(&q_params, 0, sizeof(q_params));
2376 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2377 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
2378 	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2379 		  &q_params.params.update.update_flags);
2380 	if (enable)
2381 		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2382 			  &q_params.params.update.update_flags);
2383 	else
2384 		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2385 			    &q_params.params.update.update_flags);
2386 
2387 	/* send the ramrod on all the queues of the PF */
2388 	for_each_eth_queue(bp, i) {
2389 		struct bnx2x_fastpath *fp = &bp->fp[i];
2390 		int tx_idx;
2391 
2392 		/* Set the appropriate Queue object */
2393 		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2394 
2395 		for (tx_idx = FIRST_TX_COS_INDEX;
2396 		     tx_idx < fp->max_cos; tx_idx++) {
2397 			q_params.params.update.cid_index = tx_idx;
2398 
2399 			/* Update the Queue state */
2400 			rc = bnx2x_queue_state_change(bp, &q_params);
2401 			if (rc) {
2402 				BNX2X_ERR("Failed to configure Tx switching\n");
2403 				return rc;
2404 			}
2405 		}
2406 	}
2407 
2408 	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2409 	return 0;
2410 }
2411 
bnx2x_sriov_configure(struct pci_dev * dev,int num_vfs_param)2412 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2413 {
2414 	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2415 
2416 	if (!IS_SRIOV(bp)) {
2417 		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2418 		return -EINVAL;
2419 	}
2420 
2421 	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2422 	   num_vfs_param, BNX2X_NR_VIRTFN(bp));
2423 
2424 	/* HW channel is only operational when PF is up */
2425 	if (bp->state != BNX2X_STATE_OPEN) {
2426 		BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2427 		return -EINVAL;
2428 	}
2429 
2430 	/* we are always bound by the total_vfs in the configuration space */
2431 	if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2432 		BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2433 			  num_vfs_param, BNX2X_NR_VIRTFN(bp));
2434 		num_vfs_param = BNX2X_NR_VIRTFN(bp);
2435 	}
2436 
2437 	bp->requested_nr_virtfn = num_vfs_param;
2438 	if (num_vfs_param == 0) {
2439 		bnx2x_set_pf_tx_switching(bp, false);
2440 		bnx2x_disable_sriov(bp);
2441 		return 0;
2442 	} else {
2443 		return bnx2x_enable_sriov(bp);
2444 	}
2445 }
2446 
2447 #define IGU_ENTRY_SIZE 4
2448 
bnx2x_enable_sriov(struct bnx2x * bp)2449 int bnx2x_enable_sriov(struct bnx2x *bp)
2450 {
2451 	int rc = 0, req_vfs = bp->requested_nr_virtfn;
2452 	int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2453 	u32 igu_entry, address;
2454 	u16 num_vf_queues;
2455 
2456 	if (req_vfs == 0)
2457 		return 0;
2458 
2459 	first_vf = bp->vfdb->sriov.first_vf_in_pf;
2460 
2461 	/* statically distribute vf sb pool between VFs */
2462 	num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2463 			      BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2464 
2465 	/* zero previous values learned from igu cam */
2466 	for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2467 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2468 
2469 		vf->sb_count = 0;
2470 		vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2471 	}
2472 	bp->vfdb->vf_sbs_pool = 0;
2473 
2474 	/* prepare IGU cam */
2475 	sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2476 	address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2477 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2478 		for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2479 			igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2480 				vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2481 				IGU_REG_MAPPING_MEMORY_VALID;
2482 			DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2483 			   sb_idx, vf_idx);
2484 			REG_WR(bp, address, igu_entry);
2485 			sb_idx++;
2486 			address += IGU_ENTRY_SIZE;
2487 		}
2488 	}
2489 
2490 	/* Reinitialize vf database according to igu cam */
2491 	bnx2x_get_vf_igu_cam_info(bp);
2492 
2493 	DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2494 	   BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2495 
2496 	qcount = 0;
2497 	for_each_vf(bp, vf_idx) {
2498 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2499 
2500 		/* set local queue arrays */
2501 		vf->vfqs = &bp->vfdb->vfqs[qcount];
2502 		qcount += vf_sb_count(vf);
2503 		bnx2x_iov_static_resc(bp, vf);
2504 	}
2505 
2506 	/* prepare msix vectors in VF configuration space - the value in the
2507 	 * PCI configuration space should be the index of the last entry,
2508 	 * namely one less than the actual size of the table
2509 	 */
2510 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2511 		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2512 		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2513 		       num_vf_queues - 1);
2514 		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2515 		   vf_idx, num_vf_queues - 1);
2516 	}
2517 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2518 
2519 	/* enable sriov. This will probe all the VFs, and consequentially cause
2520 	 * the "acquire" messages to appear on the VF PF channel.
2521 	 */
2522 	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2523 	bnx2x_disable_sriov(bp);
2524 
2525 	rc = bnx2x_set_pf_tx_switching(bp, true);
2526 	if (rc)
2527 		return rc;
2528 
2529 	rc = pci_enable_sriov(bp->pdev, req_vfs);
2530 	if (rc) {
2531 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2532 		return rc;
2533 	}
2534 	DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2535 	return req_vfs;
2536 }
2537 
bnx2x_pf_set_vfs_vlan(struct bnx2x * bp)2538 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2539 {
2540 	int vfidx;
2541 	struct pf_vf_bulletin_content *bulletin;
2542 
2543 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2544 	for_each_vf(bp, vfidx) {
2545 		bulletin = BP_VF_BULLETIN(bp, vfidx);
2546 		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2547 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2548 					  htons(ETH_P_8021Q));
2549 	}
2550 }
2551 
bnx2x_disable_sriov(struct bnx2x * bp)2552 void bnx2x_disable_sriov(struct bnx2x *bp)
2553 {
2554 	if (pci_vfs_assigned(bp->pdev)) {
2555 		DP(BNX2X_MSG_IOV,
2556 		   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2557 		return;
2558 	}
2559 
2560 	pci_disable_sriov(bp->pdev);
2561 }
2562 
bnx2x_vf_op_prep(struct bnx2x * bp,int vfidx,struct bnx2x_virtf ** vf,struct pf_vf_bulletin_content ** bulletin,bool test_queue)2563 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2564 			    struct bnx2x_virtf **vf,
2565 			    struct pf_vf_bulletin_content **bulletin,
2566 			    bool test_queue)
2567 {
2568 	if (bp->state != BNX2X_STATE_OPEN) {
2569 		BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2570 		return -EINVAL;
2571 	}
2572 
2573 	if (!IS_SRIOV(bp)) {
2574 		BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2575 		return -EINVAL;
2576 	}
2577 
2578 	if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2579 		BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2580 			  vfidx, BNX2X_NR_VIRTFN(bp));
2581 		return -EINVAL;
2582 	}
2583 
2584 	/* init members */
2585 	*vf = BP_VF(bp, vfidx);
2586 	*bulletin = BP_VF_BULLETIN(bp, vfidx);
2587 
2588 	if (!*vf) {
2589 		BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2590 		return -EINVAL;
2591 	}
2592 
2593 	if (test_queue && !(*vf)->vfqs) {
2594 		BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2595 			  vfidx);
2596 		return -EINVAL;
2597 	}
2598 
2599 	if (!*bulletin) {
2600 		BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2601 			  vfidx);
2602 		return -EINVAL;
2603 	}
2604 
2605 	return 0;
2606 }
2607 
bnx2x_get_vf_config(struct net_device * dev,int vfidx,struct ifla_vf_info * ivi)2608 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2609 			struct ifla_vf_info *ivi)
2610 {
2611 	struct bnx2x *bp = netdev_priv(dev);
2612 	struct bnx2x_virtf *vf = NULL;
2613 	struct pf_vf_bulletin_content *bulletin = NULL;
2614 	struct bnx2x_vlan_mac_obj *mac_obj;
2615 	struct bnx2x_vlan_mac_obj *vlan_obj;
2616 	int rc;
2617 
2618 	/* sanity and init */
2619 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2620 	if (rc)
2621 		return rc;
2622 
2623 	mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2624 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2625 	if (!mac_obj || !vlan_obj) {
2626 		BNX2X_ERR("VF partially initialized\n");
2627 		return -EINVAL;
2628 	}
2629 
2630 	ivi->vf = vfidx;
2631 	ivi->qos = 0;
2632 	ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2633 	ivi->min_tx_rate = 0;
2634 	ivi->spoofchk = vf->spoofchk ? 1 : 0;
2635 	ivi->linkstate = vf->link_cfg;
2636 	if (vf->state == VF_ENABLED) {
2637 		/* mac and vlan are in vlan_mac objects */
2638 		if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2639 			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2640 						0, ETH_ALEN);
2641 			vlan_obj->get_n_elements(bp, vlan_obj, 1,
2642 						 (u8 *)&ivi->vlan, 0,
2643 						 VLAN_HLEN);
2644 		}
2645 	} else {
2646 		mutex_lock(&bp->vfdb->bulletin_mutex);
2647 		/* mac */
2648 		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2649 			/* mac configured by ndo so its in bulletin board */
2650 			memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2651 		else
2652 			/* function has not been loaded yet. Show mac as 0s */
2653 			eth_zero_addr(ivi->mac);
2654 
2655 		/* vlan */
2656 		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2657 			/* vlan configured by ndo so its in bulletin board */
2658 			memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2659 		else
2660 			/* function has not been loaded yet. Show vlans as 0s */
2661 			memset(&ivi->vlan, 0, VLAN_HLEN);
2662 
2663 		mutex_unlock(&bp->vfdb->bulletin_mutex);
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 /* New mac for VF. Consider these cases:
2670  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2671  *    supply at acquire.
2672  * 2. VF has already been acquired but has not yet initialized - store in local
2673  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2674  *    will configure this mac when it is ready.
2675  * 3. VF has already initialized but has not yet setup a queue - post the new
2676  *    mac on VF's bulletin board right now. VF will configure this mac when it
2677  *    is ready.
2678  * 4. VF has already set a queue - delete any macs already configured for this
2679  *    queue and manually config the new mac.
2680  * In any event, once this function has been called refuse any attempts by the
2681  * VF to configure any mac for itself except for this mac. In case of a race
2682  * where the VF fails to see the new post on its bulletin board before sending a
2683  * mac configuration request, the PF will simply fail the request and VF can try
2684  * again after consulting its bulletin board.
2685  */
bnx2x_set_vf_mac(struct net_device * dev,int vfidx,u8 * mac)2686 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2687 {
2688 	struct bnx2x *bp = netdev_priv(dev);
2689 	int rc, q_logical_state;
2690 	struct bnx2x_virtf *vf = NULL;
2691 	struct pf_vf_bulletin_content *bulletin = NULL;
2692 
2693 	if (!is_valid_ether_addr(mac)) {
2694 		BNX2X_ERR("mac address invalid\n");
2695 		return -EINVAL;
2696 	}
2697 
2698 	/* sanity and init */
2699 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2700 	if (rc)
2701 		return rc;
2702 
2703 	mutex_lock(&bp->vfdb->bulletin_mutex);
2704 
2705 	/* update PF's copy of the VF's bulletin. Will no longer accept mac
2706 	 * configuration requests from vf unless match this mac
2707 	 */
2708 	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2709 	memcpy(bulletin->mac, mac, ETH_ALEN);
2710 
2711 	/* Post update on VF's bulletin board */
2712 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
2713 
2714 	/* release lock before checking return code */
2715 	mutex_unlock(&bp->vfdb->bulletin_mutex);
2716 
2717 	if (rc) {
2718 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2719 		return rc;
2720 	}
2721 
2722 	q_logical_state =
2723 		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2724 	if (vf->state == VF_ENABLED &&
2725 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2726 		/* configure the mac in device on this vf's queue */
2727 		unsigned long ramrod_flags = 0;
2728 		struct bnx2x_vlan_mac_obj *mac_obj;
2729 
2730 		/* User should be able to see failure reason in system logs */
2731 		if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2732 			return -EINVAL;
2733 
2734 		/* must lock vfpf channel to protect against vf flows */
2735 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2736 
2737 		/* remove existing eth macs */
2738 		mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2739 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2740 		if (rc) {
2741 			BNX2X_ERR("failed to delete eth macs\n");
2742 			rc = -EINVAL;
2743 			goto out;
2744 		}
2745 
2746 		/* remove existing uc list macs */
2747 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2748 		if (rc) {
2749 			BNX2X_ERR("failed to delete uc_list macs\n");
2750 			rc = -EINVAL;
2751 			goto out;
2752 		}
2753 
2754 		/* configure the new mac to device */
2755 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2756 		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2757 				  BNX2X_ETH_MAC, &ramrod_flags);
2758 
2759 out:
2760 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2761 	}
2762 
2763 	return rc;
2764 }
2765 
bnx2x_set_vf_vlan_acceptance(struct bnx2x * bp,struct bnx2x_virtf * vf,bool accept)2766 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2767 					 struct bnx2x_virtf *vf, bool accept)
2768 {
2769 	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2770 	unsigned long accept_flags;
2771 
2772 	/* need to remove/add the VF's accept_any_vlan bit */
2773 	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2774 	if (accept)
2775 		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2776 	else
2777 		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2778 
2779 	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2780 			      accept_flags);
2781 	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2782 	bnx2x_config_rx_mode(bp, &rx_ramrod);
2783 }
2784 
bnx2x_set_vf_vlan_filter(struct bnx2x * bp,struct bnx2x_virtf * vf,u16 vlan,bool add)2785 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2786 				    u16 vlan, bool add)
2787 {
2788 	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2789 	unsigned long ramrod_flags = 0;
2790 	int rc = 0;
2791 
2792 	/* configure the new vlan to device */
2793 	memset(&ramrod_param, 0, sizeof(ramrod_param));
2794 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2795 	ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2796 	ramrod_param.ramrod_flags = ramrod_flags;
2797 	ramrod_param.user_req.u.vlan.vlan = vlan;
2798 	ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2799 					: BNX2X_VLAN_MAC_DEL;
2800 	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2801 	if (rc) {
2802 		BNX2X_ERR("failed to configure vlan\n");
2803 		return -EINVAL;
2804 	}
2805 
2806 	return 0;
2807 }
2808 
bnx2x_set_vf_vlan(struct net_device * dev,int vfidx,u16 vlan,u8 qos,__be16 vlan_proto)2809 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2810 		      __be16 vlan_proto)
2811 {
2812 	struct pf_vf_bulletin_content *bulletin = NULL;
2813 	struct bnx2x *bp = netdev_priv(dev);
2814 	struct bnx2x_vlan_mac_obj *vlan_obj;
2815 	unsigned long vlan_mac_flags = 0;
2816 	unsigned long ramrod_flags = 0;
2817 	struct bnx2x_virtf *vf = NULL;
2818 	int i, rc;
2819 
2820 	if (vlan > 4095) {
2821 		BNX2X_ERR("illegal vlan value %d\n", vlan);
2822 		return -EINVAL;
2823 	}
2824 
2825 	if (vlan_proto != htons(ETH_P_8021Q))
2826 		return -EPROTONOSUPPORT;
2827 
2828 	DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2829 	   vfidx, vlan, 0);
2830 
2831 	/* sanity and init */
2832 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2833 	if (rc)
2834 		return rc;
2835 
2836 	/* update PF's copy of the VF's bulletin. No point in posting the vlan
2837 	 * to the VF since it doesn't have anything to do with it. But it useful
2838 	 * to store it here in case the VF is not up yet and we can only
2839 	 * configure the vlan later when it does. Treat vlan id 0 as remove the
2840 	 * Host tag.
2841 	 */
2842 	mutex_lock(&bp->vfdb->bulletin_mutex);
2843 
2844 	if (vlan > 0)
2845 		bulletin->valid_bitmap |= 1 << VLAN_VALID;
2846 	else
2847 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2848 	bulletin->vlan = vlan;
2849 
2850 	/* Post update on VF's bulletin board */
2851 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
2852 	if (rc)
2853 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2854 	mutex_unlock(&bp->vfdb->bulletin_mutex);
2855 
2856 	/* is vf initialized and queue set up? */
2857 	if (vf->state != VF_ENABLED ||
2858 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2859 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2860 		return rc;
2861 
2862 	/* User should be able to see error in system logs */
2863 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2864 		return -EINVAL;
2865 
2866 	/* must lock vfpf channel to protect against vf flows */
2867 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2868 
2869 	/* remove existing vlans */
2870 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2871 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2872 	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2873 				  &ramrod_flags);
2874 	if (rc) {
2875 		BNX2X_ERR("failed to delete vlans\n");
2876 		rc = -EINVAL;
2877 		goto out;
2878 	}
2879 
2880 	/* clear accept_any_vlan when HV forces vlan, otherwise
2881 	 * according to VF capabilities
2882 	 */
2883 	if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2884 		bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2885 
2886 	rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2887 	if (rc)
2888 		goto out;
2889 
2890 	/* send queue update ramrods to configure default vlan and
2891 	 * silent vlan removal
2892 	 */
2893 	for_each_vfq(vf, i) {
2894 		struct bnx2x_queue_state_params q_params = {NULL};
2895 		struct bnx2x_queue_update_params *update_params;
2896 
2897 		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2898 
2899 		/* validate the Q is UP */
2900 		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2901 		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2902 			continue;
2903 
2904 		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2905 		q_params.cmd = BNX2X_Q_CMD_UPDATE;
2906 		update_params = &q_params.params.update;
2907 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2908 			  &update_params->update_flags);
2909 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2910 			  &update_params->update_flags);
2911 		if (vlan == 0) {
2912 			/* if vlan is 0 then we want to leave the VF traffic
2913 			 * untagged, and leave the incoming traffic untouched
2914 			 * (i.e. do not remove any vlan tags).
2915 			 */
2916 			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2917 				    &update_params->update_flags);
2918 			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2919 				    &update_params->update_flags);
2920 		} else {
2921 			/* configure default vlan to vf queue and set silent
2922 			 * vlan removal (the vf remains unaware of this vlan).
2923 			 */
2924 			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2925 				  &update_params->update_flags);
2926 			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2927 				  &update_params->update_flags);
2928 			update_params->def_vlan = vlan;
2929 			update_params->silent_removal_value =
2930 				vlan & VLAN_VID_MASK;
2931 			update_params->silent_removal_mask = VLAN_VID_MASK;
2932 		}
2933 
2934 		/* Update the Queue state */
2935 		rc = bnx2x_queue_state_change(bp, &q_params);
2936 		if (rc) {
2937 			BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2938 				  i);
2939 			goto out;
2940 		}
2941 	}
2942 out:
2943 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2944 
2945 	if (rc)
2946 		DP(BNX2X_MSG_IOV,
2947 		   "updated VF[%d] vlan configuration (vlan = %d)\n",
2948 		   vfidx, vlan);
2949 
2950 	return rc;
2951 }
2952 
bnx2x_set_vf_spoofchk(struct net_device * dev,int idx,bool val)2953 int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
2954 {
2955 	struct bnx2x *bp = netdev_priv(dev);
2956 	struct bnx2x_virtf *vf;
2957 	int i, rc = 0;
2958 
2959 	vf = BP_VF(bp, idx);
2960 	if (!vf)
2961 		return -EINVAL;
2962 
2963 	/* nothing to do */
2964 	if (vf->spoofchk == val)
2965 		return 0;
2966 
2967 	vf->spoofchk = val ? 1 : 0;
2968 
2969 	DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
2970 	   val ? "enabling" : "disabling", idx);
2971 
2972 	/* is vf initialized and queue set up? */
2973 	if (vf->state != VF_ENABLED ||
2974 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2975 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2976 		return rc;
2977 
2978 	/* User should be able to see error in system logs */
2979 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2980 		return -EINVAL;
2981 
2982 	/* send queue update ramrods to configure spoofchk */
2983 	for_each_vfq(vf, i) {
2984 		struct bnx2x_queue_state_params q_params = {NULL};
2985 		struct bnx2x_queue_update_params *update_params;
2986 
2987 		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2988 
2989 		/* validate the Q is UP */
2990 		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2991 		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2992 			continue;
2993 
2994 		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2995 		q_params.cmd = BNX2X_Q_CMD_UPDATE;
2996 		update_params = &q_params.params.update;
2997 		__set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
2998 			  &update_params->update_flags);
2999 		if (val) {
3000 			__set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
3001 				  &update_params->update_flags);
3002 		} else {
3003 			__clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
3004 				    &update_params->update_flags);
3005 		}
3006 
3007 		/* Update the Queue state */
3008 		rc = bnx2x_queue_state_change(bp, &q_params);
3009 		if (rc) {
3010 			BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
3011 				  val ? "enable" : "disable", idx, i);
3012 			goto out;
3013 		}
3014 	}
3015 out:
3016 	if (!rc)
3017 		DP(BNX2X_MSG_IOV,
3018 		   "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
3019 		   idx);
3020 
3021 	return rc;
3022 }
3023 
3024 /* crc is the first field in the bulletin board. Compute the crc over the
3025  * entire bulletin board excluding the crc field itself. Use the length field
3026  * as the Bulletin Board was posted by a PF with possibly a different version
3027  * from the vf which will sample it. Therefore, the length is computed by the
3028  * PF and then used blindly by the VF.
3029  */
bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content * bulletin)3030 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
3031 {
3032 	return crc32(BULLETIN_CRC_SEED,
3033 		 ((u8 *)bulletin) + sizeof(bulletin->crc),
3034 		 bulletin->length - sizeof(bulletin->crc));
3035 }
3036 
3037 /* Check for new posts on the bulletin board */
bnx2x_sample_bulletin(struct bnx2x * bp)3038 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3039 {
3040 	struct pf_vf_bulletin_content *bulletin;
3041 	int attempts;
3042 
3043 	/* sampling structure in mid post may result with corrupted data
3044 	 * validate crc to ensure coherency.
3045 	 */
3046 	for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3047 		u32 crc;
3048 
3049 		/* sample the bulletin board */
3050 		memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
3051 		       sizeof(union pf_vf_bulletin));
3052 
3053 		crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3054 
3055 		if (bp->shadow_bulletin.content.crc == crc)
3056 			break;
3057 
3058 		BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3059 			  bp->shadow_bulletin.content.crc, crc);
3060 	}
3061 
3062 	if (attempts >= BULLETIN_ATTEMPTS) {
3063 		BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3064 			  attempts);
3065 		return PFVF_BULLETIN_CRC_ERR;
3066 	}
3067 	bulletin = &bp->shadow_bulletin.content;
3068 
3069 	/* bulletin board hasn't changed since last sample */
3070 	if (bp->old_bulletin.version == bulletin->version)
3071 		return PFVF_BULLETIN_UNCHANGED;
3072 
3073 	/* the mac address in bulletin board is valid and is new */
3074 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3075 	    !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3076 		/* update new mac to net device */
3077 		memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3078 	}
3079 
3080 	if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3081 		DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3082 		   bulletin->link_speed, bulletin->link_flags);
3083 
3084 		bp->vf_link_vars.line_speed = bulletin->link_speed;
3085 		bp->vf_link_vars.link_report_flags = 0;
3086 		/* Link is down */
3087 		if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3088 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3089 				  &bp->vf_link_vars.link_report_flags);
3090 		/* Full DUPLEX */
3091 		if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3092 			__set_bit(BNX2X_LINK_REPORT_FD,
3093 				  &bp->vf_link_vars.link_report_flags);
3094 		/* Rx Flow Control is ON */
3095 		if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3096 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3097 				  &bp->vf_link_vars.link_report_flags);
3098 		/* Tx Flow Control is ON */
3099 		if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3100 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3101 				  &bp->vf_link_vars.link_report_flags);
3102 		__bnx2x_link_report(bp);
3103 	}
3104 
3105 	/* copy new bulletin board to bp */
3106 	memcpy(&bp->old_bulletin, bulletin,
3107 	       sizeof(struct pf_vf_bulletin_content));
3108 
3109 	return PFVF_BULLETIN_UPDATED;
3110 }
3111 
bnx2x_timer_sriov(struct bnx2x * bp)3112 void bnx2x_timer_sriov(struct bnx2x *bp)
3113 {
3114 	bnx2x_sample_bulletin(bp);
3115 
3116 	/* if channel is down we need to self destruct */
3117 	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3118 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3119 				       BNX2X_MSG_IOV);
3120 }
3121 
bnx2x_vf_doorbells(struct bnx2x * bp)3122 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3123 {
3124 	/* vf doorbells are embedded within the regview */
3125 	return bp->regview + PXP_VF_ADDR_DB_START;
3126 }
3127 
bnx2x_vf_pci_dealloc(struct bnx2x * bp)3128 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3129 {
3130 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3131 		       sizeof(struct bnx2x_vf_mbx_msg));
3132 	BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3133 		       sizeof(union pf_vf_bulletin));
3134 }
3135 
bnx2x_vf_pci_alloc(struct bnx2x * bp)3136 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3137 {
3138 	mutex_init(&bp->vf2pf_mutex);
3139 
3140 	/* allocate vf2pf mailbox for vf to pf channel */
3141 	bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3142 					 sizeof(struct bnx2x_vf_mbx_msg));
3143 	if (!bp->vf2pf_mbox)
3144 		goto alloc_mem_err;
3145 
3146 	/* allocate pf 2 vf bulletin board */
3147 	bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3148 					     sizeof(union pf_vf_bulletin));
3149 	if (!bp->pf2vf_bulletin)
3150 		goto alloc_mem_err;
3151 
3152 	bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3153 
3154 	return 0;
3155 
3156 alloc_mem_err:
3157 	bnx2x_vf_pci_dealloc(bp);
3158 	return -ENOMEM;
3159 }
3160 
bnx2x_iov_channel_down(struct bnx2x * bp)3161 void bnx2x_iov_channel_down(struct bnx2x *bp)
3162 {
3163 	int vf_idx;
3164 	struct pf_vf_bulletin_content *bulletin;
3165 
3166 	if (!IS_SRIOV(bp))
3167 		return;
3168 
3169 	for_each_vf(bp, vf_idx) {
3170 		/* locate this VFs bulletin board and update the channel down
3171 		 * bit
3172 		 */
3173 		bulletin = BP_VF_BULLETIN(bp, vf_idx);
3174 		bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3175 
3176 		/* update vf bulletin board */
3177 		bnx2x_post_vf_bulletin(bp, vf_idx);
3178 	}
3179 }
3180 
bnx2x_iov_task(struct work_struct * work)3181 void bnx2x_iov_task(struct work_struct *work)
3182 {
3183 	struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3184 
3185 	if (!netif_running(bp->dev))
3186 		return;
3187 
3188 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3189 			       &bp->iov_task_state))
3190 		bnx2x_vf_handle_flr_event(bp);
3191 
3192 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3193 			       &bp->iov_task_state))
3194 		bnx2x_vf_mbx(bp);
3195 }
3196 
bnx2x_schedule_iov_task(struct bnx2x * bp,enum bnx2x_iov_flag flag)3197 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3198 {
3199 	smp_mb__before_atomic();
3200 	set_bit(flag, &bp->iov_task_state);
3201 	smp_mb__after_atomic();
3202 	DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3203 	queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3204 }
3205