• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  */
5 
6 #ifndef _QED_VF_H
7 #define _QED_VF_H
8 
9 #include "qed_l2.h"
10 #include "qed_mcp.h"
11 
12 #define T_ETH_INDIRECTION_TABLE_SIZE 128
13 #define T_ETH_RSS_KEY_SIZE 10
14 
15 struct vf_pf_resc_request {
16 	u8 num_rxqs;
17 	u8 num_txqs;
18 	u8 num_sbs;
19 	u8 num_mac_filters;
20 	u8 num_vlan_filters;
21 	u8 num_mc_filters;
22 	u8 num_cids;
23 	u8 padding;
24 };
25 
26 struct hw_sb_info {
27 	u16 hw_sb_id;
28 	u8 sb_qid;
29 	u8 padding[5];
30 };
31 
32 #define TLV_BUFFER_SIZE                 1024
33 
34 enum {
35 	PFVF_STATUS_WAITING,
36 	PFVF_STATUS_SUCCESS,
37 	PFVF_STATUS_FAILURE,
38 	PFVF_STATUS_NOT_SUPPORTED,
39 	PFVF_STATUS_NO_RESOURCE,
40 	PFVF_STATUS_FORCED,
41 	PFVF_STATUS_MALICIOUS,
42 };
43 
44 /* vf pf channel tlvs */
45 /* general tlv header (used for both vf->pf request and pf->vf response) */
46 struct channel_tlv {
47 	u16 type;
48 	u16 length;
49 };
50 
51 /* header of first vf->pf tlv carries the offset used to calculate reponse
52  * buffer address
53  */
54 struct vfpf_first_tlv {
55 	struct channel_tlv tl;
56 	u32 padding;
57 	u64 reply_address;
58 };
59 
60 /* header of pf->vf tlvs, carries the status of handling the request */
61 struct pfvf_tlv {
62 	struct channel_tlv tl;
63 	u8 status;
64 	u8 padding[3];
65 };
66 
67 /* response tlv used for most tlvs */
68 struct pfvf_def_resp_tlv {
69 	struct pfvf_tlv hdr;
70 };
71 
72 /* used to terminate and pad a tlv list */
73 struct channel_list_end_tlv {
74 	struct channel_tlv tl;
75 	u8 padding[4];
76 };
77 
78 #define VFPF_ACQUIRE_OS_LINUX (0)
79 #define VFPF_ACQUIRE_OS_WINDOWS (1)
80 #define VFPF_ACQUIRE_OS_ESX (2)
81 #define VFPF_ACQUIRE_OS_SOLARIS (3)
82 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
83 
84 struct vfpf_acquire_tlv {
85 	struct vfpf_first_tlv first_tlv;
86 
87 	struct vf_pf_vfdev_info {
88 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
89 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
90 	/* A requirement for supporting multi-Tx queues on a single queue-zone,
91 	 * VF would pass qids as additional information whenever passing queue
92 	 * references.
93 	 */
94 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS     BIT(2)
95 
96 	/* The VF is using the physical bar. While this is mostly internal
97 	 * to the VF, might affect the number of CIDs supported assuming
98 	 * QUEUE_QIDS is set.
99 	 */
100 #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR   BIT(3)
101 		u64 capabilities;
102 		u8 fw_major;
103 		u8 fw_minor;
104 		u8 fw_revision;
105 		u8 fw_engineering;
106 		u32 driver_version;
107 		u16 opaque_fid;	/* ME register value */
108 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
109 		u8 eth_fp_hsi_major;
110 		u8 eth_fp_hsi_minor;
111 		u8 padding[3];
112 	} vfdev_info;
113 
114 	struct vf_pf_resc_request resc_request;
115 
116 	u64 bulletin_addr;
117 	u32 bulletin_size;
118 	u32 padding;
119 };
120 
121 /* receive side scaling tlv */
122 struct vfpf_vport_update_rss_tlv {
123 	struct channel_tlv tl;
124 
125 	u8 update_rss_flags;
126 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
127 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
128 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
129 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
130 
131 	u8 rss_enable;
132 	u8 rss_caps;
133 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
134 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
135 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
136 };
137 
138 struct pfvf_storm_stats {
139 	u32 address;
140 	u32 len;
141 };
142 
143 struct pfvf_stats_info {
144 	struct pfvf_storm_stats mstats;
145 	struct pfvf_storm_stats pstats;
146 	struct pfvf_storm_stats tstats;
147 	struct pfvf_storm_stats ustats;
148 };
149 
150 struct pfvf_acquire_resp_tlv {
151 	struct pfvf_tlv hdr;
152 
153 	struct pf_vf_pfdev_info {
154 		u32 chip_num;
155 		u32 mfw_ver;
156 
157 		u16 fw_major;
158 		u16 fw_minor;
159 		u16 fw_rev;
160 		u16 fw_eng;
161 
162 		u64 capabilities;
163 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
164 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
165 /* There are old PF versions where the PF might mistakenly override the sanity
166  * mechanism [version-based] and allow a VF that can't be supported to pass
167  * the acquisition phase.
168  * To overcome this, PFs now indicate that they're past that point and the new
169  * VFs would fail probe on the older PFs that fail to do so.
170  */
171 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
172 
173 	/* PF expects queues to be received with additional qids */
174 #define PFVF_ACQUIRE_CAP_QUEUE_QIDS             BIT(3)
175 
176 		u16 db_size;
177 		u8 indices_per_sb;
178 		u8 os_type;
179 
180 		/* These should match the PF's qed_dev values */
181 		u16 chip_rev;
182 		u8 dev_type;
183 
184 		/* Doorbell bar size configured in HW: log(size) or 0 */
185 		u8 bar_size;
186 
187 		struct pfvf_stats_info stats_info;
188 
189 		u8 port_mac[ETH_ALEN];
190 
191 		/* It's possible PF had to configure an older fastpath HSI
192 		 * [in case VF is newer than PF]. This is communicated back
193 		 * to the VF. It can also be used in case of error due to
194 		 * non-matching versions to shed light in VF about failure.
195 		 */
196 		u8 major_fp_hsi;
197 		u8 minor_fp_hsi;
198 	} pfdev_info;
199 
200 	struct pf_vf_resc {
201 #define PFVF_MAX_QUEUES_PER_VF		16
202 #define PFVF_MAX_SBS_PER_VF		16
203 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
204 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
205 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
206 
207 		u8 num_rxqs;
208 		u8 num_txqs;
209 		u8 num_sbs;
210 		u8 num_mac_filters;
211 		u8 num_vlan_filters;
212 		u8 num_mc_filters;
213 		u8 num_cids;
214 		u8 padding;
215 	} resc;
216 
217 	u32 bulletin_size;
218 	u32 padding;
219 };
220 
221 struct pfvf_start_queue_resp_tlv {
222 	struct pfvf_tlv hdr;
223 	u32 offset;		/* offset to consumer/producer of queue */
224 	u8 padding[4];
225 };
226 
227 /* Extended queue information - additional index for reference inside qzone.
228  * If commmunicated between VF/PF, each TLV relating to queues should be
229  * extended by one such [or have a future base TLV that already contains info].
230  */
231 struct vfpf_qid_tlv {
232 	struct channel_tlv tl;
233 	u8 qid;
234 	u8 padding[3];
235 };
236 
237 /* Setup Queue */
238 struct vfpf_start_rxq_tlv {
239 	struct vfpf_first_tlv first_tlv;
240 
241 	/* physical addresses */
242 	u64 rxq_addr;
243 	u64 deprecated_sge_addr;
244 	u64 cqe_pbl_addr;
245 
246 	u16 cqe_pbl_size;
247 	u16 hw_sb;
248 	u16 rx_qid;
249 	u16 hc_rate;		/* desired interrupts per sec. */
250 
251 	u16 bd_max_bytes;
252 	u16 stat_id;
253 	u8 sb_index;
254 	u8 padding[3];
255 };
256 
257 struct vfpf_start_txq_tlv {
258 	struct vfpf_first_tlv first_tlv;
259 
260 	/* physical addresses */
261 	u64 pbl_addr;
262 	u16 pbl_size;
263 	u16 stat_id;
264 	u16 tx_qid;
265 	u16 hw_sb;
266 
267 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
268 	u16 hc_rate;		/* desired interrupts per sec. */
269 	u8 sb_index;
270 	u8 padding[3];
271 };
272 
273 /* Stop RX Queue */
274 struct vfpf_stop_rxqs_tlv {
275 	struct vfpf_first_tlv first_tlv;
276 
277 	u16 rx_qid;
278 
279 	/* this field is deprecated and should *always* be set to '1' */
280 	u8 num_rxqs;
281 	u8 cqe_completion;
282 	u8 padding[4];
283 };
284 
285 /* Stop TX Queues */
286 struct vfpf_stop_txqs_tlv {
287 	struct vfpf_first_tlv first_tlv;
288 
289 	u16 tx_qid;
290 
291 	/* this field is deprecated and should *always* be set to '1' */
292 	u8 num_txqs;
293 	u8 padding[5];
294 };
295 
296 struct vfpf_update_rxq_tlv {
297 	struct vfpf_first_tlv first_tlv;
298 
299 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
300 
301 	u16 rx_qid;
302 	u8 num_rxqs;
303 	u8 flags;
304 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
305 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
306 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
307 
308 	u8 padding[4];
309 };
310 
311 /* Set Queue Filters */
312 struct vfpf_q_mac_vlan_filter {
313 	u32 flags;
314 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
315 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
316 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
317 
318 	u8 mac[ETH_ALEN];
319 	u16 vlan_tag;
320 
321 	u8 padding[4];
322 };
323 
324 /* Start a vport */
325 struct vfpf_vport_start_tlv {
326 	struct vfpf_first_tlv first_tlv;
327 
328 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
329 
330 	u32 tpa_mode;
331 	u16 dep1;
332 	u16 mtu;
333 
334 	u8 vport_id;
335 	u8 inner_vlan_removal;
336 
337 	u8 only_untagged;
338 	u8 max_buffers_per_cqe;
339 
340 	u8 padding[4];
341 };
342 
343 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
344 struct vfpf_vport_update_activate_tlv {
345 	struct channel_tlv tl;
346 	u8 update_rx;
347 	u8 update_tx;
348 	u8 active_rx;
349 	u8 active_tx;
350 };
351 
352 struct vfpf_vport_update_tx_switch_tlv {
353 	struct channel_tlv tl;
354 	u8 tx_switching;
355 	u8 padding[3];
356 };
357 
358 struct vfpf_vport_update_vlan_strip_tlv {
359 	struct channel_tlv tl;
360 	u8 remove_vlan;
361 	u8 padding[3];
362 };
363 
364 struct vfpf_vport_update_mcast_bin_tlv {
365 	struct channel_tlv tl;
366 	u8 padding[4];
367 
368 	/* There are only 256 approx bins, and in HSI they're divided into
369 	 * 32-bit values. As old VFs used to set-bit to the values on its side,
370 	 * the upper half of the array is never expected to contain any data.
371 	 */
372 	u64 bins[4];
373 	u64 obsolete_bins[4];
374 };
375 
376 struct vfpf_vport_update_accept_param_tlv {
377 	struct channel_tlv tl;
378 	u8 update_rx_mode;
379 	u8 update_tx_mode;
380 	u8 rx_accept_filter;
381 	u8 tx_accept_filter;
382 };
383 
384 struct vfpf_vport_update_accept_any_vlan_tlv {
385 	struct channel_tlv tl;
386 	u8 update_accept_any_vlan_flg;
387 	u8 accept_any_vlan;
388 
389 	u8 padding[2];
390 };
391 
392 struct vfpf_vport_update_sge_tpa_tlv {
393 	struct channel_tlv tl;
394 
395 	u16 sge_tpa_flags;
396 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
397 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
398 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
399 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
400 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
401 
402 	u8 update_sge_tpa_flags;
403 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
404 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
405 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
406 
407 	u8 max_buffers_per_cqe;
408 
409 	u16 deprecated_sge_buff_size;
410 	u16 tpa_max_size;
411 	u16 tpa_min_size_to_start;
412 	u16 tpa_min_size_to_cont;
413 
414 	u8 tpa_max_aggs_num;
415 	u8 padding[7];
416 };
417 
418 /* Primary tlv as a header for various extended tlvs for
419  * various functionalities in vport update ramrod.
420  */
421 struct vfpf_vport_update_tlv {
422 	struct vfpf_first_tlv first_tlv;
423 };
424 
425 struct vfpf_ucast_filter_tlv {
426 	struct vfpf_first_tlv first_tlv;
427 
428 	u8 opcode;
429 	u8 type;
430 
431 	u8 mac[ETH_ALEN];
432 
433 	u16 vlan;
434 	u16 padding[3];
435 };
436 
437 /* tunnel update param tlv */
438 struct vfpf_update_tunn_param_tlv {
439 	struct vfpf_first_tlv first_tlv;
440 
441 	u8 tun_mode_update_mask;
442 	u8 tunn_mode;
443 	u8 update_tun_cls;
444 	u8 vxlan_clss;
445 	u8 l2gre_clss;
446 	u8 ipgre_clss;
447 	u8 l2geneve_clss;
448 	u8 ipgeneve_clss;
449 	u8 update_geneve_port;
450 	u8 update_vxlan_port;
451 	u16 geneve_port;
452 	u16 vxlan_port;
453 	u8 padding[2];
454 };
455 
456 struct pfvf_update_tunn_param_tlv {
457 	struct pfvf_tlv hdr;
458 
459 	u16 tunn_feature_mask;
460 	u8 vxlan_mode;
461 	u8 l2geneve_mode;
462 	u8 ipgeneve_mode;
463 	u8 l2gre_mode;
464 	u8 ipgre_mode;
465 	u8 vxlan_clss;
466 	u8 l2gre_clss;
467 	u8 ipgre_clss;
468 	u8 l2geneve_clss;
469 	u8 ipgeneve_clss;
470 	u16 vxlan_udp_port;
471 	u16 geneve_udp_port;
472 };
473 
474 struct tlv_buffer_size {
475 	u8 tlv_buffer[TLV_BUFFER_SIZE];
476 };
477 
478 struct vfpf_update_coalesce {
479 	struct vfpf_first_tlv first_tlv;
480 	u16 rx_coal;
481 	u16 tx_coal;
482 	u16 qid;
483 	u8 padding[2];
484 };
485 
486 struct vfpf_read_coal_req_tlv {
487 	struct vfpf_first_tlv first_tlv;
488 	u16 qid;
489 	u8 is_rx;
490 	u8 padding[5];
491 };
492 
493 struct pfvf_read_coal_resp_tlv {
494 	struct pfvf_tlv hdr;
495 	u16 coal;
496 	u8 padding[6];
497 };
498 
499 struct vfpf_bulletin_update_mac_tlv {
500 	struct vfpf_first_tlv first_tlv;
501 	u8 mac[ETH_ALEN];
502 	u8 padding[2];
503 };
504 
505 union vfpf_tlvs {
506 	struct vfpf_first_tlv first_tlv;
507 	struct vfpf_acquire_tlv acquire;
508 	struct vfpf_start_rxq_tlv start_rxq;
509 	struct vfpf_start_txq_tlv start_txq;
510 	struct vfpf_stop_rxqs_tlv stop_rxqs;
511 	struct vfpf_stop_txqs_tlv stop_txqs;
512 	struct vfpf_update_rxq_tlv update_rxq;
513 	struct vfpf_vport_start_tlv start_vport;
514 	struct vfpf_vport_update_tlv vport_update;
515 	struct vfpf_ucast_filter_tlv ucast_filter;
516 	struct vfpf_update_tunn_param_tlv tunn_param_update;
517 	struct vfpf_update_coalesce update_coalesce;
518 	struct vfpf_read_coal_req_tlv read_coal_req;
519 	struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
520 	struct tlv_buffer_size tlv_buf_size;
521 };
522 
523 union pfvf_tlvs {
524 	struct pfvf_def_resp_tlv default_resp;
525 	struct pfvf_acquire_resp_tlv acquire_resp;
526 	struct tlv_buffer_size tlv_buf_size;
527 	struct pfvf_start_queue_resp_tlv queue_start;
528 	struct pfvf_update_tunn_param_tlv tunn_param_resp;
529 	struct pfvf_read_coal_resp_tlv read_coal_resp;
530 };
531 
532 enum qed_bulletin_bit {
533 	/* Alert the VF that a forced MAC was set by the PF */
534 	MAC_ADDR_FORCED = 0,
535 	/* Alert the VF that a forced VLAN was set by the PF */
536 	VLAN_ADDR_FORCED = 2,
537 
538 	/* Indicate that `default_only_untagged' contains actual data */
539 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
540 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
541 
542 	/* Alert the VF that suggested mac was sent by the PF.
543 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
544 	 */
545 	VFPF_BULLETIN_MAC_ADDR = 5
546 };
547 
548 struct qed_bulletin_content {
549 	/* crc of structure to ensure is not in mid-update */
550 	u32 crc;
551 
552 	u32 version;
553 
554 	/* bitmap indicating which fields hold valid values */
555 	u64 valid_bitmap;
556 
557 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
558 	u8 mac[ETH_ALEN];
559 
560 	/* If valid, 1 => only untagged Rx if no vlan is configured */
561 	u8 default_only_untagged;
562 	u8 padding;
563 
564 	/* The following is a 'copy' of qed_mcp_link_state,
565 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
566 	 * possible the structs will increase further along the road we cannot
567 	 * have it here; Instead we need to have all of its fields.
568 	 */
569 	u8 req_autoneg;
570 	u8 req_autoneg_pause;
571 	u8 req_forced_rx;
572 	u8 req_forced_tx;
573 	u8 padding2[4];
574 
575 	u32 req_adv_speed;
576 	u32 req_forced_speed;
577 	u32 req_loopback;
578 	u32 padding3;
579 
580 	u8 link_up;
581 	u8 full_duplex;
582 	u8 autoneg;
583 	u8 autoneg_complete;
584 	u8 parallel_detection;
585 	u8 pfc_enabled;
586 	u8 partner_tx_flow_ctrl_en;
587 	u8 partner_rx_flow_ctrl_en;
588 	u8 partner_adv_pause;
589 	u8 sfp_tx_fault;
590 	u16 vxlan_udp_port;
591 	u16 geneve_udp_port;
592 	u8 padding4[2];
593 
594 	u32 speed;
595 	u32 partner_adv_speed;
596 
597 	u32 capability_speed;
598 
599 	/* Forced vlan */
600 	u16 pvid;
601 	u16 padding5;
602 };
603 
604 struct qed_bulletin {
605 	dma_addr_t phys;
606 	struct qed_bulletin_content *p_virt;
607 	u32 size;
608 };
609 
610 enum {
611 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
612 	CHANNEL_TLV_ACQUIRE,
613 	CHANNEL_TLV_VPORT_START,
614 	CHANNEL_TLV_VPORT_UPDATE,
615 	CHANNEL_TLV_VPORT_TEARDOWN,
616 	CHANNEL_TLV_START_RXQ,
617 	CHANNEL_TLV_START_TXQ,
618 	CHANNEL_TLV_STOP_RXQS,
619 	CHANNEL_TLV_STOP_TXQS,
620 	CHANNEL_TLV_UPDATE_RXQ,
621 	CHANNEL_TLV_INT_CLEANUP,
622 	CHANNEL_TLV_CLOSE,
623 	CHANNEL_TLV_RELEASE,
624 	CHANNEL_TLV_LIST_END,
625 	CHANNEL_TLV_UCAST_FILTER,
626 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
627 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
628 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
629 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
630 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
631 	CHANNEL_TLV_VPORT_UPDATE_RSS,
632 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
633 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
634 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
635 	CHANNEL_TLV_COALESCE_UPDATE,
636 	CHANNEL_TLV_QID,
637 	CHANNEL_TLV_COALESCE_READ,
638 	CHANNEL_TLV_BULLETIN_UPDATE_MAC,
639 	CHANNEL_TLV_MAX,
640 
641 	/* Required for iterating over vport-update tlvs.
642 	 * Will break in case non-sequential vport-update tlvs.
643 	 */
644 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
645 };
646 
647 /* Default number of CIDs [total of both Rx and Tx] to be requested
648  * by default, and maximum possible number.
649  */
650 #define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
651 #define QED_ETH_VF_MAX_NUM_CIDS (250)
652 
653 /* This data is held in the qed_hwfn structure for VFs only. */
654 struct qed_vf_iov {
655 	union vfpf_tlvs *vf2pf_request;
656 	dma_addr_t vf2pf_request_phys;
657 	union pfvf_tlvs *pf2vf_reply;
658 	dma_addr_t pf2vf_reply_phys;
659 
660 	/* Should be taken whenever the mailbox buffers are accessed */
661 	struct mutex mutex;
662 	u8 *offset;
663 
664 	/* Bulletin Board */
665 	struct qed_bulletin bulletin;
666 	struct qed_bulletin_content bulletin_shadow;
667 
668 	/* we set aside a copy of the acquire response */
669 	struct pfvf_acquire_resp_tlv acquire_resp;
670 
671 	/* In case PF originates prior to the fp-hsi version comparison,
672 	 * this has to be propagated as it affects the fastpath.
673 	 */
674 	bool b_pre_fp_hsi;
675 
676 	/* Current day VFs are passing the SBs physical address on vport
677 	 * start, and as they lack an IGU mapping they need to store the
678 	 * addresses of previously registered SBs.
679 	 * Even if we were to change configuration flow, due to backward
680 	 * compatibility [with older PFs] we'd still need to store these.
681 	 */
682 	struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
683 
684 	/* Determines whether VF utilizes doorbells via limited register
685 	 * bar or via the doorbell bar.
686 	 */
687 	bool b_doorbell_bar;
688 };
689 
690 /**
691  * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
692  *             Coalesce value '0' will omit the configuration.
693  *
694  * @param p_hwfn
695  * @param rx_coal - coalesce value in micro second for rx queue
696  * @param tx_coal - coalesce value in micro second for tx queue
697  * @param p_cid   - queue cid
698  *
699  **/
700 int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
701 			   u16 rx_coal,
702 			   u16 tx_coal, struct qed_queue_cid *p_cid);
703 
704 /**
705  * @brief VF - Get coalesce per VF's relative queue.
706  *
707  * @param p_hwfn
708  * @param p_coal - coalesce value in micro second for VF queues.
709  * @param p_cid  - queue cid
710  *
711  **/
712 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
713 			   u16 *p_coal, struct qed_queue_cid *p_cid);
714 
715 #ifdef CONFIG_QED_SRIOV
716 /**
717  * @brief Read the VF bulletin and act on it if needed
718  *
719  * @param p_hwfn
720  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
721  *
722  * @return enum _qed_status
723  */
724 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
725 
726 /**
727  * @brief Get link paramters for VF from qed
728  *
729  * @param p_hwfn
730  * @param params - the link params structure to be filled for the VF
731  */
732 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
733 			    struct qed_mcp_link_params *params);
734 
735 /**
736  * @brief Get link state for VF from qed
737  *
738  * @param p_hwfn
739  * @param link - the link state structure to be filled for the VF
740  */
741 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
742 			   struct qed_mcp_link_state *link);
743 
744 /**
745  * @brief Get link capabilities for VF from qed
746  *
747  * @param p_hwfn
748  * @param p_link_caps - the link capabilities structure to be filled for the VF
749  */
750 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
751 			  struct qed_mcp_link_capabilities *p_link_caps);
752 
753 /**
754  * @brief Get number of Rx queues allocated for VF by qed
755  *
756  *  @param p_hwfn
757  *  @param num_rxqs - allocated RX queues
758  */
759 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
760 
761 /**
762  * @brief Get number of Rx queues allocated for VF by qed
763  *
764  *  @param p_hwfn
765  *  @param num_txqs - allocated RX queues
766  */
767 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
768 
769 /**
770  * @brief Get number of available connections [both Rx and Tx] for VF
771  *
772  * @param p_hwfn
773  * @param num_cids - allocated number of connections
774  */
775 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
776 
777 /**
778  * @brief Get port mac address for VF
779  *
780  * @param p_hwfn
781  * @param port_mac - destination location for port mac
782  */
783 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
784 
785 /**
786  * @brief Get number of VLAN filters allocated for VF by qed
787  *
788  *  @param p_hwfn
789  *  @param num_rxqs - allocated VLAN filters
790  */
791 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
792 				 u8 *num_vlan_filters);
793 
794 /**
795  * @brief Get number of MAC filters allocated for VF by qed
796  *
797  *  @param p_hwfn
798  *  @param num_rxqs - allocated MAC filters
799  */
800 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
801 
802 /**
803  * @brief Check if VF can set a MAC address
804  *
805  * @param p_hwfn
806  * @param mac
807  *
808  * @return bool
809  */
810 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
811 
812 /**
813  * @brief Set firmware version information in dev_info from VFs acquire response tlv
814  *
815  * @param p_hwfn
816  * @param fw_major
817  * @param fw_minor
818  * @param fw_rev
819  * @param fw_eng
820  */
821 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
822 			   u16 *fw_major, u16 *fw_minor,
823 			   u16 *fw_rev, u16 *fw_eng);
824 
825 /**
826  * @brief hw preparation for VF
827  *      sends ACQUIRE message
828  *
829  * @param p_hwfn
830  *
831  * @return int
832  */
833 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
834 
835 /**
836  * @brief VF - start the RX Queue by sending a message to the PF
837  * @param p_hwfn
838  * @param p_cid			- Only relative fields are relevant
839  * @param bd_max_bytes          - maximum number of bytes per bd
840  * @param bd_chain_phys_addr    - physical address of bd chain
841  * @param cqe_pbl_addr          - physical address of pbl
842  * @param cqe_pbl_size          - pbl size
843  * @param pp_prod               - pointer to the producer to be
844  *				  used in fastpath
845  *
846  * @return int
847  */
848 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
849 			struct qed_queue_cid *p_cid,
850 			u16 bd_max_bytes,
851 			dma_addr_t bd_chain_phys_addr,
852 			dma_addr_t cqe_pbl_addr,
853 			u16 cqe_pbl_size, void __iomem **pp_prod);
854 
855 /**
856  * @brief VF - start the TX queue by sending a message to the
857  *        PF.
858  *
859  * @param p_hwfn
860  * @param tx_queue_id           - zero based within the VF
861  * @param sb                    - status block for this queue
862  * @param sb_index              - index within the status block
863  * @param bd_chain_phys_addr    - physical address of tx chain
864  * @param pp_doorbell           - pointer to address to which to
865  *                      write the doorbell too..
866  *
867  * @return int
868  */
869 int
870 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
871 		    struct qed_queue_cid *p_cid,
872 		    dma_addr_t pbl_addr,
873 		    u16 pbl_size, void __iomem **pp_doorbell);
874 
875 /**
876  * @brief VF - stop the RX queue by sending a message to the PF
877  *
878  * @param p_hwfn
879  * @param p_cid
880  * @param cqe_completion
881  *
882  * @return int
883  */
884 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
885 		       struct qed_queue_cid *p_cid, bool cqe_completion);
886 
887 /**
888  * @brief VF - stop the TX queue by sending a message to the PF
889  *
890  * @param p_hwfn
891  * @param tx_qid
892  *
893  * @return int
894  */
895 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
896 
897 /**
898  * @brief VF - send a vport update command
899  *
900  * @param p_hwfn
901  * @param params
902  *
903  * @return int
904  */
905 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
906 			   struct qed_sp_vport_update_params *p_params);
907 
908 /**
909  *
910  * @brief VF - send a close message to PF
911  *
912  * @param p_hwfn
913  *
914  * @return enum _qed_status
915  */
916 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
917 
918 /**
919  * @brief VF - free vf`s memories
920  *
921  * @param p_hwfn
922  *
923  * @return enum _qed_status
924  */
925 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
926 
927 /**
928  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
929  *        sb_id. For VFs igu sbs don't have to be contiguous
930  *
931  * @param p_hwfn
932  * @param sb_id
933  *
934  * @return INLINE u16
935  */
936 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
937 
938 /**
939  * @brief Stores [or removes] a configured sb_info.
940  *
941  * @param p_hwfn
942  * @param sb_id - zero-based SB index [for fastpath]
943  * @param sb_info - may be NULL [during removal].
944  */
945 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
946 			u16 sb_id, struct qed_sb_info *p_sb);
947 
948 /**
949  * @brief qed_vf_pf_vport_start - perform vport start for VF.
950  *
951  * @param p_hwfn
952  * @param vport_id
953  * @param mtu
954  * @param inner_vlan_removal
955  * @param tpa_mode
956  * @param max_buffers_per_cqe,
957  * @param only_untagged - default behavior regarding vlan acceptance
958  *
959  * @return enum _qed_status
960  */
961 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
962 			  u8 vport_id,
963 			  u16 mtu,
964 			  u8 inner_vlan_removal,
965 			  enum qed_tpa_mode tpa_mode,
966 			  u8 max_buffers_per_cqe, u8 only_untagged);
967 
968 /**
969  * @brief qed_vf_pf_vport_stop - stop the VF's vport
970  *
971  * @param p_hwfn
972  *
973  * @return enum _qed_status
974  */
975 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
976 
977 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
978 			   struct qed_filter_ucast *p_param);
979 
980 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
981 			    struct qed_filter_mcast *p_filter_cmd);
982 
983 /**
984  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
985  *
986  * @param p_hwfn
987  *
988  * @return enum _qed_status
989  */
990 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
991 
992 /**
993  * @brief - return the link params in a given bulletin board
994  *
995  * @param p_hwfn
996  * @param p_params - pointer to a struct to fill with link params
997  * @param p_bulletin
998  */
999 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1000 			      struct qed_mcp_link_params *p_params,
1001 			      struct qed_bulletin_content *p_bulletin);
1002 
1003 /**
1004  * @brief - return the link state in a given bulletin board
1005  *
1006  * @param p_hwfn
1007  * @param p_link - pointer to a struct to fill with link state
1008  * @param p_bulletin
1009  */
1010 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1011 			     struct qed_mcp_link_state *p_link,
1012 			     struct qed_bulletin_content *p_bulletin);
1013 
1014 /**
1015  * @brief - return the link capabilities in a given bulletin board
1016  *
1017  * @param p_hwfn
1018  * @param p_link - pointer to a struct to fill with link capabilities
1019  * @param p_bulletin
1020  */
1021 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1022 			    struct qed_mcp_link_capabilities *p_link_caps,
1023 			    struct qed_bulletin_content *p_bulletin);
1024 
1025 void qed_iov_vf_task(struct work_struct *work);
1026 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
1027 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1028 				  struct qed_tunnel_info *p_tunn);
1029 
1030 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
1031 /**
1032  * @brief - Ask PF to update the MAC address in it's bulletin board
1033  *
1034  * @param p_mac - mac address to be updated in bulletin board
1035  */
1036 int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
1037 
1038 #else
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1039 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1040 					  struct qed_mcp_link_params *params)
1041 {
1042 }
1043 
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1044 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1045 					 struct qed_mcp_link_state *link)
1046 {
1047 }
1048 
1049 static inline void
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1050 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1051 		     struct qed_mcp_link_capabilities *p_link_caps)
1052 {
1053 }
1054 
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1055 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1056 {
1057 }
1058 
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1059 static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1060 {
1061 }
1062 
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1063 static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1064 {
1065 }
1066 
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1067 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1068 {
1069 }
1070 
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1071 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
1072 					       u8 *num_vlan_filters)
1073 {
1074 }
1075 
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1076 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
1077 					      u8 *num_mac_filters)
1078 {
1079 }
1080 
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1081 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1082 {
1083 	return false;
1084 }
1085 
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1086 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1087 					 u16 *fw_major, u16 *fw_minor,
1088 					 u16 *fw_rev, u16 *fw_eng)
1089 {
1090 }
1091 
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)1092 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
1093 {
1094 	return -EINVAL;
1095 }
1096 
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_adr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)1097 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
1098 				      struct qed_queue_cid *p_cid,
1099 				      u16 bd_max_bytes,
1100 				      dma_addr_t bd_chain_phys_adr,
1101 				      dma_addr_t cqe_pbl_addr,
1102 				      u16 cqe_pbl_size, void __iomem **pp_prod)
1103 {
1104 	return -EINVAL;
1105 }
1106 
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)1107 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
1108 				      struct qed_queue_cid *p_cid,
1109 				      dma_addr_t pbl_addr,
1110 				      u16 pbl_size, void __iomem **pp_doorbell)
1111 {
1112 	return -EINVAL;
1113 }
1114 
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)1115 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
1116 				     struct qed_queue_cid *p_cid,
1117 				     bool cqe_completion)
1118 {
1119 	return -EINVAL;
1120 }
1121 
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)1122 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
1123 				     struct qed_queue_cid *p_cid)
1124 {
1125 	return -EINVAL;
1126 }
1127 
1128 static inline int
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1129 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1130 		       struct qed_sp_vport_update_params *p_params)
1131 {
1132 	return -EINVAL;
1133 }
1134 
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1135 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1136 {
1137 	return -EINVAL;
1138 }
1139 
qed_vf_pf_release(struct qed_hwfn * p_hwfn)1140 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
1141 {
1142 	return -EINVAL;
1143 }
1144 
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1145 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1146 {
1147 	return 0;
1148 }
1149 
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1150 static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
1151 				      struct qed_sb_info *p_sb)
1152 {
1153 }
1154 
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1155 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
1156 					u8 vport_id,
1157 					u16 mtu,
1158 					u8 inner_vlan_removal,
1159 					enum qed_tpa_mode tpa_mode,
1160 					u8 max_buffers_per_cqe,
1161 					u8 only_untagged)
1162 {
1163 	return -EINVAL;
1164 }
1165 
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)1166 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
1167 {
1168 	return -EINVAL;
1169 }
1170 
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_param)1171 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1172 					 struct qed_filter_ucast *p_param)
1173 {
1174 	return -EINVAL;
1175 }
1176 
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1177 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1178 					  struct qed_filter_mcast *p_filter_cmd)
1179 {
1180 }
1181 
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1182 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1183 {
1184 	return -EINVAL;
1185 }
1186 
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1187 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1188 					    struct qed_mcp_link_params
1189 					    *p_params,
1190 					    struct qed_bulletin_content
1191 					    *p_bulletin)
1192 {
1193 }
1194 
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1195 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1196 					   struct qed_mcp_link_state *p_link,
1197 					   struct qed_bulletin_content
1198 					   *p_bulletin)
1199 {
1200 }
1201 
1202 static inline void
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1203 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1204 		       struct qed_mcp_link_capabilities *p_link_caps,
1205 		       struct qed_bulletin_content *p_bulletin)
1206 {
1207 }
1208 
qed_iov_vf_task(struct work_struct * work)1209 static inline void qed_iov_vf_task(struct work_struct *work)
1210 {
1211 }
1212 
1213 static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)1214 qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
1215 {
1216 }
1217 
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tunn)1218 static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1219 						struct qed_tunnel_info *p_tunn)
1220 {
1221 	return -EINVAL;
1222 }
1223 
qed_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,u8 * p_mac)1224 static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1225 						u8 *p_mac)
1226 {
1227 	return -EINVAL;
1228 }
1229 
1230 static inline u32
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)1231 qed_vf_hw_bar_size(struct qed_hwfn  *p_hwfn,
1232 		   enum BAR_ID bar_id)
1233 {
1234 	return 0;
1235 }
1236 #endif
1237 
1238 #endif
1239