• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef _QED_VF_H
34 #define _QED_VF_H
35 
36 #include "qed_l2.h"
37 #include "qed_mcp.h"
38 
39 #define T_ETH_INDIRECTION_TABLE_SIZE 128
40 #define T_ETH_RSS_KEY_SIZE 10
41 
42 struct vf_pf_resc_request {
43 	u8 num_rxqs;
44 	u8 num_txqs;
45 	u8 num_sbs;
46 	u8 num_mac_filters;
47 	u8 num_vlan_filters;
48 	u8 num_mc_filters;
49 	u8 num_cids;
50 	u8 padding;
51 };
52 
53 struct hw_sb_info {
54 	u16 hw_sb_id;
55 	u8 sb_qid;
56 	u8 padding[5];
57 };
58 
59 #define TLV_BUFFER_SIZE                 1024
60 
61 enum {
62 	PFVF_STATUS_WAITING,
63 	PFVF_STATUS_SUCCESS,
64 	PFVF_STATUS_FAILURE,
65 	PFVF_STATUS_NOT_SUPPORTED,
66 	PFVF_STATUS_NO_RESOURCE,
67 	PFVF_STATUS_FORCED,
68 	PFVF_STATUS_MALICIOUS,
69 };
70 
71 /* vf pf channel tlvs */
72 /* general tlv header (used for both vf->pf request and pf->vf response) */
73 struct channel_tlv {
74 	u16 type;
75 	u16 length;
76 };
77 
78 /* header of first vf->pf tlv carries the offset used to calculate reponse
79  * buffer address
80  */
81 struct vfpf_first_tlv {
82 	struct channel_tlv tl;
83 	u32 padding;
84 	u64 reply_address;
85 };
86 
87 /* header of pf->vf tlvs, carries the status of handling the request */
88 struct pfvf_tlv {
89 	struct channel_tlv tl;
90 	u8 status;
91 	u8 padding[3];
92 };
93 
94 /* response tlv used for most tlvs */
95 struct pfvf_def_resp_tlv {
96 	struct pfvf_tlv hdr;
97 };
98 
99 /* used to terminate and pad a tlv list */
100 struct channel_list_end_tlv {
101 	struct channel_tlv tl;
102 	u8 padding[4];
103 };
104 
105 #define VFPF_ACQUIRE_OS_LINUX (0)
106 #define VFPF_ACQUIRE_OS_WINDOWS (1)
107 #define VFPF_ACQUIRE_OS_ESX (2)
108 #define VFPF_ACQUIRE_OS_SOLARIS (3)
109 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
110 
111 struct vfpf_acquire_tlv {
112 	struct vfpf_first_tlv first_tlv;
113 
114 	struct vf_pf_vfdev_info {
115 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
116 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
117 	/* A requirement for supporting multi-Tx queues on a single queue-zone,
118 	 * VF would pass qids as additional information whenever passing queue
119 	 * references.
120 	 */
121 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS     BIT(2)
122 
123 	/* The VF is using the physical bar. While this is mostly internal
124 	 * to the VF, might affect the number of CIDs supported assuming
125 	 * QUEUE_QIDS is set.
126 	 */
127 #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR   BIT(3)
128 		u64 capabilities;
129 		u8 fw_major;
130 		u8 fw_minor;
131 		u8 fw_revision;
132 		u8 fw_engineering;
133 		u32 driver_version;
134 		u16 opaque_fid;	/* ME register value */
135 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
136 		u8 eth_fp_hsi_major;
137 		u8 eth_fp_hsi_minor;
138 		u8 padding[3];
139 	} vfdev_info;
140 
141 	struct vf_pf_resc_request resc_request;
142 
143 	u64 bulletin_addr;
144 	u32 bulletin_size;
145 	u32 padding;
146 };
147 
148 /* receive side scaling tlv */
149 struct vfpf_vport_update_rss_tlv {
150 	struct channel_tlv tl;
151 
152 	u8 update_rss_flags;
153 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
154 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
155 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
156 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
157 
158 	u8 rss_enable;
159 	u8 rss_caps;
160 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
161 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
162 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
163 };
164 
165 struct pfvf_storm_stats {
166 	u32 address;
167 	u32 len;
168 };
169 
170 struct pfvf_stats_info {
171 	struct pfvf_storm_stats mstats;
172 	struct pfvf_storm_stats pstats;
173 	struct pfvf_storm_stats tstats;
174 	struct pfvf_storm_stats ustats;
175 };
176 
177 struct pfvf_acquire_resp_tlv {
178 	struct pfvf_tlv hdr;
179 
180 	struct pf_vf_pfdev_info {
181 		u32 chip_num;
182 		u32 mfw_ver;
183 
184 		u16 fw_major;
185 		u16 fw_minor;
186 		u16 fw_rev;
187 		u16 fw_eng;
188 
189 		u64 capabilities;
190 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
191 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
192 /* There are old PF versions where the PF might mistakenly override the sanity
193  * mechanism [version-based] and allow a VF that can't be supported to pass
194  * the acquisition phase.
195  * To overcome this, PFs now indicate that they're past that point and the new
196  * VFs would fail probe on the older PFs that fail to do so.
197  */
198 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
199 
200 	/* PF expects queues to be received with additional qids */
201 #define PFVF_ACQUIRE_CAP_QUEUE_QIDS             BIT(3)
202 
203 		u16 db_size;
204 		u8 indices_per_sb;
205 		u8 os_type;
206 
207 		/* These should match the PF's qed_dev values */
208 		u16 chip_rev;
209 		u8 dev_type;
210 
211 		/* Doorbell bar size configured in HW: log(size) or 0 */
212 		u8 bar_size;
213 
214 		struct pfvf_stats_info stats_info;
215 
216 		u8 port_mac[ETH_ALEN];
217 
218 		/* It's possible PF had to configure an older fastpath HSI
219 		 * [in case VF is newer than PF]. This is communicated back
220 		 * to the VF. It can also be used in case of error due to
221 		 * non-matching versions to shed light in VF about failure.
222 		 */
223 		u8 major_fp_hsi;
224 		u8 minor_fp_hsi;
225 	} pfdev_info;
226 
227 	struct pf_vf_resc {
228 #define PFVF_MAX_QUEUES_PER_VF		16
229 #define PFVF_MAX_SBS_PER_VF		16
230 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
231 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
232 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
233 
234 		u8 num_rxqs;
235 		u8 num_txqs;
236 		u8 num_sbs;
237 		u8 num_mac_filters;
238 		u8 num_vlan_filters;
239 		u8 num_mc_filters;
240 		u8 num_cids;
241 		u8 padding;
242 	} resc;
243 
244 	u32 bulletin_size;
245 	u32 padding;
246 };
247 
248 struct pfvf_start_queue_resp_tlv {
249 	struct pfvf_tlv hdr;
250 	u32 offset;		/* offset to consumer/producer of queue */
251 	u8 padding[4];
252 };
253 
254 /* Extended queue information - additional index for reference inside qzone.
255  * If commmunicated between VF/PF, each TLV relating to queues should be
256  * extended by one such [or have a future base TLV that already contains info].
257  */
258 struct vfpf_qid_tlv {
259 	struct channel_tlv tl;
260 	u8 qid;
261 	u8 padding[3];
262 };
263 
264 /* Setup Queue */
265 struct vfpf_start_rxq_tlv {
266 	struct vfpf_first_tlv first_tlv;
267 
268 	/* physical addresses */
269 	u64 rxq_addr;
270 	u64 deprecated_sge_addr;
271 	u64 cqe_pbl_addr;
272 
273 	u16 cqe_pbl_size;
274 	u16 hw_sb;
275 	u16 rx_qid;
276 	u16 hc_rate;		/* desired interrupts per sec. */
277 
278 	u16 bd_max_bytes;
279 	u16 stat_id;
280 	u8 sb_index;
281 	u8 padding[3];
282 };
283 
284 struct vfpf_start_txq_tlv {
285 	struct vfpf_first_tlv first_tlv;
286 
287 	/* physical addresses */
288 	u64 pbl_addr;
289 	u16 pbl_size;
290 	u16 stat_id;
291 	u16 tx_qid;
292 	u16 hw_sb;
293 
294 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
295 	u16 hc_rate;		/* desired interrupts per sec. */
296 	u8 sb_index;
297 	u8 padding[3];
298 };
299 
300 /* Stop RX Queue */
301 struct vfpf_stop_rxqs_tlv {
302 	struct vfpf_first_tlv first_tlv;
303 
304 	u16 rx_qid;
305 
306 	/* this field is deprecated and should *always* be set to '1' */
307 	u8 num_rxqs;
308 	u8 cqe_completion;
309 	u8 padding[4];
310 };
311 
312 /* Stop TX Queues */
313 struct vfpf_stop_txqs_tlv {
314 	struct vfpf_first_tlv first_tlv;
315 
316 	u16 tx_qid;
317 
318 	/* this field is deprecated and should *always* be set to '1' */
319 	u8 num_txqs;
320 	u8 padding[5];
321 };
322 
323 struct vfpf_update_rxq_tlv {
324 	struct vfpf_first_tlv first_tlv;
325 
326 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
327 
328 	u16 rx_qid;
329 	u8 num_rxqs;
330 	u8 flags;
331 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
332 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
333 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
334 
335 	u8 padding[4];
336 };
337 
338 /* Set Queue Filters */
339 struct vfpf_q_mac_vlan_filter {
340 	u32 flags;
341 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
342 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
343 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
344 
345 	u8 mac[ETH_ALEN];
346 	u16 vlan_tag;
347 
348 	u8 padding[4];
349 };
350 
351 /* Start a vport */
352 struct vfpf_vport_start_tlv {
353 	struct vfpf_first_tlv first_tlv;
354 
355 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
356 
357 	u32 tpa_mode;
358 	u16 dep1;
359 	u16 mtu;
360 
361 	u8 vport_id;
362 	u8 inner_vlan_removal;
363 
364 	u8 only_untagged;
365 	u8 max_buffers_per_cqe;
366 
367 	u8 padding[4];
368 };
369 
370 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
371 struct vfpf_vport_update_activate_tlv {
372 	struct channel_tlv tl;
373 	u8 update_rx;
374 	u8 update_tx;
375 	u8 active_rx;
376 	u8 active_tx;
377 };
378 
379 struct vfpf_vport_update_tx_switch_tlv {
380 	struct channel_tlv tl;
381 	u8 tx_switching;
382 	u8 padding[3];
383 };
384 
385 struct vfpf_vport_update_vlan_strip_tlv {
386 	struct channel_tlv tl;
387 	u8 remove_vlan;
388 	u8 padding[3];
389 };
390 
391 struct vfpf_vport_update_mcast_bin_tlv {
392 	struct channel_tlv tl;
393 	u8 padding[4];
394 
395 	/* There are only 256 approx bins, and in HSI they're divided into
396 	 * 32-bit values. As old VFs used to set-bit to the values on its side,
397 	 * the upper half of the array is never expected to contain any data.
398 	 */
399 	u64 bins[4];
400 	u64 obsolete_bins[4];
401 };
402 
403 struct vfpf_vport_update_accept_param_tlv {
404 	struct channel_tlv tl;
405 	u8 update_rx_mode;
406 	u8 update_tx_mode;
407 	u8 rx_accept_filter;
408 	u8 tx_accept_filter;
409 };
410 
411 struct vfpf_vport_update_accept_any_vlan_tlv {
412 	struct channel_tlv tl;
413 	u8 update_accept_any_vlan_flg;
414 	u8 accept_any_vlan;
415 
416 	u8 padding[2];
417 };
418 
419 struct vfpf_vport_update_sge_tpa_tlv {
420 	struct channel_tlv tl;
421 
422 	u16 sge_tpa_flags;
423 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
424 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
425 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
426 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
427 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
428 
429 	u8 update_sge_tpa_flags;
430 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
431 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
432 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
433 
434 	u8 max_buffers_per_cqe;
435 
436 	u16 deprecated_sge_buff_size;
437 	u16 tpa_max_size;
438 	u16 tpa_min_size_to_start;
439 	u16 tpa_min_size_to_cont;
440 
441 	u8 tpa_max_aggs_num;
442 	u8 padding[7];
443 };
444 
445 /* Primary tlv as a header for various extended tlvs for
446  * various functionalities in vport update ramrod.
447  */
448 struct vfpf_vport_update_tlv {
449 	struct vfpf_first_tlv first_tlv;
450 };
451 
452 struct vfpf_ucast_filter_tlv {
453 	struct vfpf_first_tlv first_tlv;
454 
455 	u8 opcode;
456 	u8 type;
457 
458 	u8 mac[ETH_ALEN];
459 
460 	u16 vlan;
461 	u16 padding[3];
462 };
463 
464 /* tunnel update param tlv */
465 struct vfpf_update_tunn_param_tlv {
466 	struct vfpf_first_tlv first_tlv;
467 
468 	u8 tun_mode_update_mask;
469 	u8 tunn_mode;
470 	u8 update_tun_cls;
471 	u8 vxlan_clss;
472 	u8 l2gre_clss;
473 	u8 ipgre_clss;
474 	u8 l2geneve_clss;
475 	u8 ipgeneve_clss;
476 	u8 update_geneve_port;
477 	u8 update_vxlan_port;
478 	u16 geneve_port;
479 	u16 vxlan_port;
480 	u8 padding[2];
481 };
482 
483 struct pfvf_update_tunn_param_tlv {
484 	struct pfvf_tlv hdr;
485 
486 	u16 tunn_feature_mask;
487 	u8 vxlan_mode;
488 	u8 l2geneve_mode;
489 	u8 ipgeneve_mode;
490 	u8 l2gre_mode;
491 	u8 ipgre_mode;
492 	u8 vxlan_clss;
493 	u8 l2gre_clss;
494 	u8 ipgre_clss;
495 	u8 l2geneve_clss;
496 	u8 ipgeneve_clss;
497 	u16 vxlan_udp_port;
498 	u16 geneve_udp_port;
499 };
500 
501 struct tlv_buffer_size {
502 	u8 tlv_buffer[TLV_BUFFER_SIZE];
503 };
504 
505 struct vfpf_update_coalesce {
506 	struct vfpf_first_tlv first_tlv;
507 	u16 rx_coal;
508 	u16 tx_coal;
509 	u16 qid;
510 	u8 padding[2];
511 };
512 
513 struct vfpf_read_coal_req_tlv {
514 	struct vfpf_first_tlv first_tlv;
515 	u16 qid;
516 	u8 is_rx;
517 	u8 padding[5];
518 };
519 
520 struct pfvf_read_coal_resp_tlv {
521 	struct pfvf_tlv hdr;
522 	u16 coal;
523 	u8 padding[6];
524 };
525 
526 union vfpf_tlvs {
527 	struct vfpf_first_tlv first_tlv;
528 	struct vfpf_acquire_tlv acquire;
529 	struct vfpf_start_rxq_tlv start_rxq;
530 	struct vfpf_start_txq_tlv start_txq;
531 	struct vfpf_stop_rxqs_tlv stop_rxqs;
532 	struct vfpf_stop_txqs_tlv stop_txqs;
533 	struct vfpf_update_rxq_tlv update_rxq;
534 	struct vfpf_vport_start_tlv start_vport;
535 	struct vfpf_vport_update_tlv vport_update;
536 	struct vfpf_ucast_filter_tlv ucast_filter;
537 	struct vfpf_update_tunn_param_tlv tunn_param_update;
538 	struct vfpf_update_coalesce update_coalesce;
539 	struct vfpf_read_coal_req_tlv read_coal_req;
540 	struct tlv_buffer_size tlv_buf_size;
541 };
542 
543 union pfvf_tlvs {
544 	struct pfvf_def_resp_tlv default_resp;
545 	struct pfvf_acquire_resp_tlv acquire_resp;
546 	struct tlv_buffer_size tlv_buf_size;
547 	struct pfvf_start_queue_resp_tlv queue_start;
548 	struct pfvf_update_tunn_param_tlv tunn_param_resp;
549 	struct pfvf_read_coal_resp_tlv read_coal_resp;
550 };
551 
552 enum qed_bulletin_bit {
553 	/* Alert the VF that a forced MAC was set by the PF */
554 	MAC_ADDR_FORCED = 0,
555 	/* Alert the VF that a forced VLAN was set by the PF */
556 	VLAN_ADDR_FORCED = 2,
557 
558 	/* Indicate that `default_only_untagged' contains actual data */
559 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
560 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
561 
562 	/* Alert the VF that suggested mac was sent by the PF.
563 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
564 	 */
565 	VFPF_BULLETIN_MAC_ADDR = 5
566 };
567 
568 struct qed_bulletin_content {
569 	/* crc of structure to ensure is not in mid-update */
570 	u32 crc;
571 
572 	u32 version;
573 
574 	/* bitmap indicating which fields hold valid values */
575 	u64 valid_bitmap;
576 
577 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
578 	u8 mac[ETH_ALEN];
579 
580 	/* If valid, 1 => only untagged Rx if no vlan is configured */
581 	u8 default_only_untagged;
582 	u8 padding;
583 
584 	/* The following is a 'copy' of qed_mcp_link_state,
585 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
586 	 * possible the structs will increase further along the road we cannot
587 	 * have it here; Instead we need to have all of its fields.
588 	 */
589 	u8 req_autoneg;
590 	u8 req_autoneg_pause;
591 	u8 req_forced_rx;
592 	u8 req_forced_tx;
593 	u8 padding2[4];
594 
595 	u32 req_adv_speed;
596 	u32 req_forced_speed;
597 	u32 req_loopback;
598 	u32 padding3;
599 
600 	u8 link_up;
601 	u8 full_duplex;
602 	u8 autoneg;
603 	u8 autoneg_complete;
604 	u8 parallel_detection;
605 	u8 pfc_enabled;
606 	u8 partner_tx_flow_ctrl_en;
607 	u8 partner_rx_flow_ctrl_en;
608 	u8 partner_adv_pause;
609 	u8 sfp_tx_fault;
610 	u16 vxlan_udp_port;
611 	u16 geneve_udp_port;
612 	u8 padding4[2];
613 
614 	u32 speed;
615 	u32 partner_adv_speed;
616 
617 	u32 capability_speed;
618 
619 	/* Forced vlan */
620 	u16 pvid;
621 	u16 padding5;
622 };
623 
624 struct qed_bulletin {
625 	dma_addr_t phys;
626 	struct qed_bulletin_content *p_virt;
627 	u32 size;
628 };
629 
630 enum {
631 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
632 	CHANNEL_TLV_ACQUIRE,
633 	CHANNEL_TLV_VPORT_START,
634 	CHANNEL_TLV_VPORT_UPDATE,
635 	CHANNEL_TLV_VPORT_TEARDOWN,
636 	CHANNEL_TLV_START_RXQ,
637 	CHANNEL_TLV_START_TXQ,
638 	CHANNEL_TLV_STOP_RXQS,
639 	CHANNEL_TLV_STOP_TXQS,
640 	CHANNEL_TLV_UPDATE_RXQ,
641 	CHANNEL_TLV_INT_CLEANUP,
642 	CHANNEL_TLV_CLOSE,
643 	CHANNEL_TLV_RELEASE,
644 	CHANNEL_TLV_LIST_END,
645 	CHANNEL_TLV_UCAST_FILTER,
646 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
647 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
648 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
649 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
650 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
651 	CHANNEL_TLV_VPORT_UPDATE_RSS,
652 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
653 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
654 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
655 	CHANNEL_TLV_COALESCE_UPDATE,
656 	CHANNEL_TLV_QID,
657 	CHANNEL_TLV_COALESCE_READ,
658 	CHANNEL_TLV_MAX,
659 
660 	/* Required for iterating over vport-update tlvs.
661 	 * Will break in case non-sequential vport-update tlvs.
662 	 */
663 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
664 };
665 
666 /* Default number of CIDs [total of both Rx and Tx] to be requested
667  * by default, and maximum possible number.
668  */
669 #define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
670 #define QED_ETH_VF_MAX_NUM_CIDS (250)
671 
672 /* This data is held in the qed_hwfn structure for VFs only. */
673 struct qed_vf_iov {
674 	union vfpf_tlvs *vf2pf_request;
675 	dma_addr_t vf2pf_request_phys;
676 	union pfvf_tlvs *pf2vf_reply;
677 	dma_addr_t pf2vf_reply_phys;
678 
679 	/* Should be taken whenever the mailbox buffers are accessed */
680 	struct mutex mutex;
681 	u8 *offset;
682 
683 	/* Bulletin Board */
684 	struct qed_bulletin bulletin;
685 	struct qed_bulletin_content bulletin_shadow;
686 
687 	/* we set aside a copy of the acquire response */
688 	struct pfvf_acquire_resp_tlv acquire_resp;
689 
690 	/* In case PF originates prior to the fp-hsi version comparison,
691 	 * this has to be propagated as it affects the fastpath.
692 	 */
693 	bool b_pre_fp_hsi;
694 
695 	/* Current day VFs are passing the SBs physical address on vport
696 	 * start, and as they lack an IGU mapping they need to store the
697 	 * addresses of previously registered SBs.
698 	 * Even if we were to change configuration flow, due to backward
699 	 * compatibility [with older PFs] we'd still need to store these.
700 	 */
701 	struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
702 
703 	/* Determines whether VF utilizes doorbells via limited register
704 	 * bar or via the doorbell bar.
705 	 */
706 	bool b_doorbell_bar;
707 };
708 
709 /**
710  * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
711  *             Coalesce value '0' will omit the configuration.
712  *
713  * @param p_hwfn
714  * @param rx_coal - coalesce value in micro second for rx queue
715  * @param tx_coal - coalesce value in micro second for tx queue
716  * @param p_cid   - queue cid
717  *
718  **/
719 int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
720 			   u16 rx_coal,
721 			   u16 tx_coal, struct qed_queue_cid *p_cid);
722 
723 /**
724  * @brief VF - Get coalesce per VF's relative queue.
725  *
726  * @param p_hwfn
727  * @param p_coal - coalesce value in micro second for VF queues.
728  * @param p_cid  - queue cid
729  *
730  **/
731 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
732 			   u16 *p_coal, struct qed_queue_cid *p_cid);
733 
734 #ifdef CONFIG_QED_SRIOV
735 /**
736  * @brief Read the VF bulletin and act on it if needed
737  *
738  * @param p_hwfn
739  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
740  *
741  * @return enum _qed_status
742  */
743 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
744 
745 /**
746  * @brief Get link paramters for VF from qed
747  *
748  * @param p_hwfn
749  * @param params - the link params structure to be filled for the VF
750  */
751 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
752 			    struct qed_mcp_link_params *params);
753 
754 /**
755  * @brief Get link state for VF from qed
756  *
757  * @param p_hwfn
758  * @param link - the link state structure to be filled for the VF
759  */
760 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
761 			   struct qed_mcp_link_state *link);
762 
763 /**
764  * @brief Get link capabilities for VF from qed
765  *
766  * @param p_hwfn
767  * @param p_link_caps - the link capabilities structure to be filled for the VF
768  */
769 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
770 			  struct qed_mcp_link_capabilities *p_link_caps);
771 
772 /**
773  * @brief Get number of Rx queues allocated for VF by qed
774  *
775  *  @param p_hwfn
776  *  @param num_rxqs - allocated RX queues
777  */
778 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
779 
780 /**
781  * @brief Get number of Rx queues allocated for VF by qed
782  *
783  *  @param p_hwfn
784  *  @param num_txqs - allocated RX queues
785  */
786 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
787 
788 /**
789  * @brief Get number of available connections [both Rx and Tx] for VF
790  *
791  * @param p_hwfn
792  * @param num_cids - allocated number of connections
793  */
794 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
795 
796 /**
797  * @brief Get port mac address for VF
798  *
799  * @param p_hwfn
800  * @param port_mac - destination location for port mac
801  */
802 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
803 
804 /**
805  * @brief Get number of VLAN filters allocated for VF by qed
806  *
807  *  @param p_hwfn
808  *  @param num_rxqs - allocated VLAN filters
809  */
810 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
811 				 u8 *num_vlan_filters);
812 
813 /**
814  * @brief Get number of MAC filters allocated for VF by qed
815  *
816  *  @param p_hwfn
817  *  @param num_rxqs - allocated MAC filters
818  */
819 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
820 
821 /**
822  * @brief Check if VF can set a MAC address
823  *
824  * @param p_hwfn
825  * @param mac
826  *
827  * @return bool
828  */
829 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
830 
831 /**
832  * @brief Set firmware version information in dev_info from VFs acquire response tlv
833  *
834  * @param p_hwfn
835  * @param fw_major
836  * @param fw_minor
837  * @param fw_rev
838  * @param fw_eng
839  */
840 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
841 			   u16 *fw_major, u16 *fw_minor,
842 			   u16 *fw_rev, u16 *fw_eng);
843 
844 /**
845  * @brief hw preparation for VF
846  *      sends ACQUIRE message
847  *
848  * @param p_hwfn
849  *
850  * @return int
851  */
852 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
853 
854 /**
855  * @brief VF - start the RX Queue by sending a message to the PF
856  * @param p_hwfn
857  * @param p_cid			- Only relative fields are relevant
858  * @param bd_max_bytes          - maximum number of bytes per bd
859  * @param bd_chain_phys_addr    - physical address of bd chain
860  * @param cqe_pbl_addr          - physical address of pbl
861  * @param cqe_pbl_size          - pbl size
862  * @param pp_prod               - pointer to the producer to be
863  *				  used in fastpath
864  *
865  * @return int
866  */
867 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
868 			struct qed_queue_cid *p_cid,
869 			u16 bd_max_bytes,
870 			dma_addr_t bd_chain_phys_addr,
871 			dma_addr_t cqe_pbl_addr,
872 			u16 cqe_pbl_size, void __iomem **pp_prod);
873 
874 /**
875  * @brief VF - start the TX queue by sending a message to the
876  *        PF.
877  *
878  * @param p_hwfn
879  * @param tx_queue_id           - zero based within the VF
880  * @param sb                    - status block for this queue
881  * @param sb_index              - index within the status block
882  * @param bd_chain_phys_addr    - physical address of tx chain
883  * @param pp_doorbell           - pointer to address to which to
884  *                      write the doorbell too..
885  *
886  * @return int
887  */
888 int
889 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
890 		    struct qed_queue_cid *p_cid,
891 		    dma_addr_t pbl_addr,
892 		    u16 pbl_size, void __iomem **pp_doorbell);
893 
894 /**
895  * @brief VF - stop the RX queue by sending a message to the PF
896  *
897  * @param p_hwfn
898  * @param p_cid
899  * @param cqe_completion
900  *
901  * @return int
902  */
903 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
904 		       struct qed_queue_cid *p_cid, bool cqe_completion);
905 
906 /**
907  * @brief VF - stop the TX queue by sending a message to the PF
908  *
909  * @param p_hwfn
910  * @param tx_qid
911  *
912  * @return int
913  */
914 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
915 
916 /**
917  * @brief VF - send a vport update command
918  *
919  * @param p_hwfn
920  * @param params
921  *
922  * @return int
923  */
924 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
925 			   struct qed_sp_vport_update_params *p_params);
926 
927 /**
928  *
929  * @brief VF - send a close message to PF
930  *
931  * @param p_hwfn
932  *
933  * @return enum _qed_status
934  */
935 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
936 
937 /**
938  * @brief VF - free vf`s memories
939  *
940  * @param p_hwfn
941  *
942  * @return enum _qed_status
943  */
944 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
945 
946 /**
947  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
948  *        sb_id. For VFs igu sbs don't have to be contiguous
949  *
950  * @param p_hwfn
951  * @param sb_id
952  *
953  * @return INLINE u16
954  */
955 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
956 
957 /**
958  * @brief Stores [or removes] a configured sb_info.
959  *
960  * @param p_hwfn
961  * @param sb_id - zero-based SB index [for fastpath]
962  * @param sb_info - may be NULL [during removal].
963  */
964 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
965 			u16 sb_id, struct qed_sb_info *p_sb);
966 
967 /**
968  * @brief qed_vf_pf_vport_start - perform vport start for VF.
969  *
970  * @param p_hwfn
971  * @param vport_id
972  * @param mtu
973  * @param inner_vlan_removal
974  * @param tpa_mode
975  * @param max_buffers_per_cqe,
976  * @param only_untagged - default behavior regarding vlan acceptance
977  *
978  * @return enum _qed_status
979  */
980 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
981 			  u8 vport_id,
982 			  u16 mtu,
983 			  u8 inner_vlan_removal,
984 			  enum qed_tpa_mode tpa_mode,
985 			  u8 max_buffers_per_cqe, u8 only_untagged);
986 
987 /**
988  * @brief qed_vf_pf_vport_stop - stop the VF's vport
989  *
990  * @param p_hwfn
991  *
992  * @return enum _qed_status
993  */
994 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
995 
996 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
997 			   struct qed_filter_ucast *p_param);
998 
999 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1000 			    struct qed_filter_mcast *p_filter_cmd);
1001 
1002 /**
1003  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
1004  *
1005  * @param p_hwfn
1006  *
1007  * @return enum _qed_status
1008  */
1009 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
1010 
1011 /**
1012  * @brief - return the link params in a given bulletin board
1013  *
1014  * @param p_hwfn
1015  * @param p_params - pointer to a struct to fill with link params
1016  * @param p_bulletin
1017  */
1018 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1019 			      struct qed_mcp_link_params *p_params,
1020 			      struct qed_bulletin_content *p_bulletin);
1021 
1022 /**
1023  * @brief - return the link state in a given bulletin board
1024  *
1025  * @param p_hwfn
1026  * @param p_link - pointer to a struct to fill with link state
1027  * @param p_bulletin
1028  */
1029 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1030 			     struct qed_mcp_link_state *p_link,
1031 			     struct qed_bulletin_content *p_bulletin);
1032 
1033 /**
1034  * @brief - return the link capabilities in a given bulletin board
1035  *
1036  * @param p_hwfn
1037  * @param p_link - pointer to a struct to fill with link capabilities
1038  * @param p_bulletin
1039  */
1040 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1041 			    struct qed_mcp_link_capabilities *p_link_caps,
1042 			    struct qed_bulletin_content *p_bulletin);
1043 
1044 void qed_iov_vf_task(struct work_struct *work);
1045 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
1046 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1047 				  struct qed_tunnel_info *p_tunn);
1048 
1049 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
1050 #else
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1051 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1052 					  struct qed_mcp_link_params *params)
1053 {
1054 }
1055 
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1056 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1057 					 struct qed_mcp_link_state *link)
1058 {
1059 }
1060 
1061 static inline void
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1062 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1063 		     struct qed_mcp_link_capabilities *p_link_caps)
1064 {
1065 }
1066 
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1067 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1068 {
1069 }
1070 
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1071 static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1072 {
1073 }
1074 
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1075 static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1076 {
1077 }
1078 
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1079 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1080 {
1081 }
1082 
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1083 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
1084 					       u8 *num_vlan_filters)
1085 {
1086 }
1087 
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1088 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
1089 					      u8 *num_mac_filters)
1090 {
1091 }
1092 
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1093 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1094 {
1095 	return false;
1096 }
1097 
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1098 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1099 					 u16 *fw_major, u16 *fw_minor,
1100 					 u16 *fw_rev, u16 *fw_eng)
1101 {
1102 }
1103 
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)1104 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
1105 {
1106 	return -EINVAL;
1107 }
1108 
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_adr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)1109 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
1110 				      struct qed_queue_cid *p_cid,
1111 				      u16 bd_max_bytes,
1112 				      dma_addr_t bd_chain_phys_adr,
1113 				      dma_addr_t cqe_pbl_addr,
1114 				      u16 cqe_pbl_size, void __iomem **pp_prod)
1115 {
1116 	return -EINVAL;
1117 }
1118 
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)1119 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
1120 				      struct qed_queue_cid *p_cid,
1121 				      dma_addr_t pbl_addr,
1122 				      u16 pbl_size, void __iomem **pp_doorbell)
1123 {
1124 	return -EINVAL;
1125 }
1126 
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)1127 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
1128 				     struct qed_queue_cid *p_cid,
1129 				     bool cqe_completion)
1130 {
1131 	return -EINVAL;
1132 }
1133 
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)1134 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
1135 				     struct qed_queue_cid *p_cid)
1136 {
1137 	return -EINVAL;
1138 }
1139 
1140 static inline int
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1141 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1142 		       struct qed_sp_vport_update_params *p_params)
1143 {
1144 	return -EINVAL;
1145 }
1146 
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1147 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1148 {
1149 	return -EINVAL;
1150 }
1151 
qed_vf_pf_release(struct qed_hwfn * p_hwfn)1152 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
1153 {
1154 	return -EINVAL;
1155 }
1156 
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1157 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1158 {
1159 	return 0;
1160 }
1161 
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1162 static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
1163 				      struct qed_sb_info *p_sb)
1164 {
1165 }
1166 
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1167 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
1168 					u8 vport_id,
1169 					u16 mtu,
1170 					u8 inner_vlan_removal,
1171 					enum qed_tpa_mode tpa_mode,
1172 					u8 max_buffers_per_cqe,
1173 					u8 only_untagged)
1174 {
1175 	return -EINVAL;
1176 }
1177 
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)1178 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
1179 {
1180 	return -EINVAL;
1181 }
1182 
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_param)1183 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1184 					 struct qed_filter_ucast *p_param)
1185 {
1186 	return -EINVAL;
1187 }
1188 
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1189 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1190 					  struct qed_filter_mcast *p_filter_cmd)
1191 {
1192 }
1193 
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1194 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1195 {
1196 	return -EINVAL;
1197 }
1198 
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1199 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1200 					    struct qed_mcp_link_params
1201 					    *p_params,
1202 					    struct qed_bulletin_content
1203 					    *p_bulletin)
1204 {
1205 }
1206 
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1207 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1208 					   struct qed_mcp_link_state *p_link,
1209 					   struct qed_bulletin_content
1210 					   *p_bulletin)
1211 {
1212 }
1213 
1214 static inline void
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1215 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1216 		       struct qed_mcp_link_capabilities *p_link_caps,
1217 		       struct qed_bulletin_content *p_bulletin)
1218 {
1219 }
1220 
qed_iov_vf_task(struct work_struct * work)1221 static inline void qed_iov_vf_task(struct work_struct *work)
1222 {
1223 }
1224 
1225 static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)1226 qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
1227 {
1228 }
1229 
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tunn)1230 static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1231 						struct qed_tunnel_info *p_tunn)
1232 {
1233 	return -EINVAL;
1234 }
1235 
1236 static inline u32
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)1237 qed_vf_hw_bar_size(struct qed_hwfn  *p_hwfn,
1238 		   enum BAR_ID bar_id)
1239 {
1240 	return 0;
1241 }
1242 #endif
1243 
1244 #endif
1245