1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #ifndef _QED_SRIOV_H
8 #define _QED_SRIOV_H
9 #include <linux/types.h>
10 #include "qed_vf.h"
11
12 #define QED_ETH_VF_NUM_MAC_FILTERS 1
13 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
14 #define QED_VF_ARRAY_LENGTH (3)
15
16 #ifdef CONFIG_QED_SRIOV
17 #define IS_VF(cdev) ((cdev)->b_is_vf)
18 #define IS_PF(cdev) (!((cdev)->b_is_vf))
19 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
20 #else
21 #define IS_VF(cdev) (0)
22 #define IS_PF(cdev) (1)
23 #define IS_PF_SRIOV(p_hwfn) (0)
24 #endif
25 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
26
27 #define QED_MAX_VF_CHAINS_PER_PF 16
28
29 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
30 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
31
32 enum qed_iov_vport_update_flag {
33 QED_IOV_VP_UPDATE_ACTIVATE,
34 QED_IOV_VP_UPDATE_VLAN_STRIP,
35 QED_IOV_VP_UPDATE_TX_SWITCH,
36 QED_IOV_VP_UPDATE_MCAST,
37 QED_IOV_VP_UPDATE_ACCEPT_PARAM,
38 QED_IOV_VP_UPDATE_RSS,
39 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
40 QED_IOV_VP_UPDATE_SGE_TPA,
41 QED_IOV_VP_UPDATE_MAX,
42 };
43
44 struct qed_public_vf_info {
45 /* These copies will later be reflected in the bulletin board,
46 * but this copy should be newer.
47 */
48 u8 forced_mac[ETH_ALEN];
49 u16 forced_vlan;
50 u8 mac[ETH_ALEN];
51
52 /* IFLA_VF_LINK_STATE_<X> */
53 int link_state;
54
55 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
56 int tx_rate;
57
58 /* Trusted VFs can configure promiscuous mode.
59 * Also store shadow promisc configuration if needed.
60 */
61 bool is_trusted_configured;
62 bool is_trusted_request;
63 u8 rx_accept_mode;
64 u8 tx_accept_mode;
65 };
66
67 struct qed_iov_vf_init_params {
68 u16 rel_vf_id;
69
70 /* Number of requested Queues; Currently, don't support different
71 * number of Rx/Tx queues.
72 */
73
74 u16 num_queues;
75
76 /* Allow the client to choose which qzones to use for Rx/Tx,
77 * and which queue_base to use for Tx queues on a per-queue basis.
78 * Notice values should be relative to the PF resources.
79 */
80 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
81 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
82 };
83
84 /* This struct is part of qed_dev and contains data relevant to all hwfns;
85 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
86 */
87 struct qed_hw_sriov_info {
88 int pos; /* capability position */
89 int nres; /* number of resources */
90 u32 cap; /* SR-IOV Capabilities */
91 u16 ctrl; /* SR-IOV Control */
92 u16 total_vfs; /* total VFs associated with the PF */
93 u16 num_vfs; /* number of vfs that have been started */
94 u16 initial_vfs; /* initial VFs associated with the PF */
95 u16 nr_virtfn; /* number of VFs available */
96 u16 offset; /* first VF Routing ID offset */
97 u16 stride; /* following VF stride */
98 u16 vf_device_id; /* VF device id */
99 u32 pgsz; /* page size for BAR alignment */
100 u8 link; /* Function Dependency Link */
101
102 u32 first_vf_in_pf;
103 };
104
105 /* This mailbox is maintained per VF in its PF contains all information
106 * required for sending / receiving a message.
107 */
108 struct qed_iov_vf_mbx {
109 union vfpf_tlvs *req_virt;
110 dma_addr_t req_phys;
111 union pfvf_tlvs *reply_virt;
112 dma_addr_t reply_phys;
113
114 /* Address in VF where a pending message is located */
115 dma_addr_t pending_req;
116
117 /* Message from VF awaits handling */
118 bool b_pending_msg;
119
120 u8 *offset;
121
122 /* saved VF request header */
123 struct vfpf_first_tlv first_tlv;
124 };
125
126 #define QED_IOV_LEGACY_QID_RX (0)
127 #define QED_IOV_LEGACY_QID_TX (1)
128 #define QED_IOV_QID_INVALID (0xFE)
129
130 struct qed_vf_queue_cid {
131 bool b_is_tx;
132 struct qed_queue_cid *p_cid;
133 };
134
135 /* Describes a qzone associated with the VF */
136 struct qed_vf_queue {
137 u16 fw_rx_qid;
138 u16 fw_tx_qid;
139
140 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
141 };
142
143 enum vf_state {
144 VF_FREE = 0, /* VF ready to be acquired holds no resc */
145 VF_ACQUIRED, /* VF, acquired, but not initalized */
146 VF_ENABLED, /* VF, Enabled */
147 VF_RESET, /* VF, FLR'd, pending cleanup */
148 VF_STOPPED /* VF, Stopped */
149 };
150
151 struct qed_vf_vlan_shadow {
152 bool used;
153 u16 vid;
154 };
155
156 struct qed_vf_shadow_config {
157 /* Shadow copy of all guest vlans */
158 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
159
160 /* Shadow copy of all configured MACs; Empty if forcing MACs */
161 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
162 u8 inner_vlan_removal;
163 };
164
165 /* PFs maintain an array of this structure, per VF */
166 struct qed_vf_info {
167 struct qed_iov_vf_mbx vf_mbx;
168 enum vf_state state;
169 bool b_init;
170 bool b_malicious;
171 u8 to_disable;
172
173 struct qed_bulletin bulletin;
174 dma_addr_t vf_bulletin;
175
176 /* PF saves a copy of the last VF acquire message */
177 struct vfpf_acquire_tlv acquire;
178
179 u32 concrete_fid;
180 u16 opaque_fid;
181 u16 mtu;
182
183 u8 vport_id;
184 u8 relative_vf_id;
185 u8 abs_vf_id;
186 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
187 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
188 (p_vf)->abs_vf_id)
189
190 u8 vport_instance;
191 u8 num_rxqs;
192 u8 num_txqs;
193
194 u16 rx_coal;
195 u16 tx_coal;
196
197 u8 num_sbs;
198
199 u8 num_mac_filters;
200 u8 num_vlan_filters;
201
202 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
203 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
204 u8 num_active_rxqs;
205 struct qed_public_vf_info p_vf_info;
206 bool spoof_chk;
207 bool req_spoofchk_val;
208
209 /* Stores the configuration requested by VF */
210 struct qed_vf_shadow_config shadow_config;
211
212 /* A bitfield using bulletin's valid-map bits, used to indicate
213 * which of the bulletin board features have been configured.
214 */
215 u64 configured_features;
216 #define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
217 (1 << VLAN_ADDR_FORCED))
218 };
219
220 /* This structure is part of qed_hwfn and used only for PFs that have sriov
221 * capability enabled.
222 */
223 struct qed_pf_iov {
224 struct qed_vf_info vfs_array[MAX_NUM_VFS];
225 u64 pending_flr[QED_VF_ARRAY_LENGTH];
226
227 /* Allocate message address continuosuly and split to each VF */
228 void *mbx_msg_virt_addr;
229 dma_addr_t mbx_msg_phys_addr;
230 u32 mbx_msg_size;
231 void *mbx_reply_virt_addr;
232 dma_addr_t mbx_reply_phys_addr;
233 u32 mbx_reply_size;
234 void *p_bulletins;
235 dma_addr_t bulletins_phys;
236 u32 bulletins_size;
237 };
238
239 enum qed_iov_wq_flag {
240 QED_IOV_WQ_MSG_FLAG,
241 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
242 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
243 QED_IOV_WQ_STOP_WQ_FLAG,
244 QED_IOV_WQ_FLR_FLAG,
245 QED_IOV_WQ_TRUST_FLAG,
246 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
247 };
248
249 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
250
251 #ifdef CONFIG_QED_SRIOV
252 /**
253 * @brief Check if given VF ID @vfid is valid
254 * w.r.t. @b_enabled_only value
255 * if b_enabled_only = true - only enabled VF id is valid
256 * else any VF id less than max_vfs is valid
257 *
258 * @param p_hwfn
259 * @param rel_vf_id - Relative VF ID
260 * @param b_enabled_only - consider only enabled VF
261 * @param b_non_malicious - true iff we want to validate vf isn't malicious.
262 *
263 * @return bool - true for valid VF ID
264 */
265 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
266 int rel_vf_id,
267 bool b_enabled_only, bool b_non_malicious);
268
269 /**
270 * @brief - Given a VF index, return index of next [including that] active VF.
271 *
272 * @param p_hwfn
273 * @param rel_vf_id
274 *
275 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
276 */
277 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
278
279 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
280 int vfid, u16 vxlan_port, u16 geneve_port);
281
282 /**
283 * @brief Read sriov related information and allocated resources
284 * reads from configuration space, shmem, etc.
285 *
286 * @param p_hwfn
287 *
288 * @return int
289 */
290 int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
291
292 /**
293 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
294 *
295 * @param p_hwfn
296 * @param p_iov
297 * @param type
298 * @param length
299 *
300 * @return pointer to the newly placed tlv
301 */
302 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
303
304 /**
305 * @brief list the types and lengths of the tlvs on the buffer
306 *
307 * @param p_hwfn
308 * @param tlvs_list
309 */
310 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
311
312 /**
313 * @brief qed_iov_alloc - allocate sriov related resources
314 *
315 * @param p_hwfn
316 *
317 * @return int
318 */
319 int qed_iov_alloc(struct qed_hwfn *p_hwfn);
320
321 /**
322 * @brief qed_iov_setup - setup sriov related resources
323 *
324 * @param p_hwfn
325 */
326 void qed_iov_setup(struct qed_hwfn *p_hwfn);
327
328 /**
329 * @brief qed_iov_free - free sriov related resources
330 *
331 * @param p_hwfn
332 */
333 void qed_iov_free(struct qed_hwfn *p_hwfn);
334
335 /**
336 * @brief free sriov related memory that was allocated during hw_prepare
337 *
338 * @param cdev
339 */
340 void qed_iov_free_hw_info(struct qed_dev *cdev);
341
342 /**
343 * @brief Mark structs of vfs that have been FLR-ed.
344 *
345 * @param p_hwfn
346 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
347 *
348 * @return true iff one of the PF's vfs got FLRed. false otherwise.
349 */
350 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
351
352 /**
353 * @brief Search extended TLVs in request/reply buffer.
354 *
355 * @param p_hwfn
356 * @param p_tlvs_list - Pointer to tlvs list
357 * @param req_type - Type of TLV
358 *
359 * @return pointer to tlv type if found, otherwise returns NULL.
360 */
361 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
362 void *p_tlvs_list, u16 req_type);
363
364 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
365 int qed_iov_wq_start(struct qed_dev *cdev);
366
367 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
368 void qed_vf_start_iov_wq(struct qed_dev *cdev);
369 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
370 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
371 #else
372 static inline bool
qed_iov_is_valid_vfid(struct qed_hwfn * p_hwfn,int rel_vf_id,bool b_enabled_only,bool b_non_malicious)373 qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
374 int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
375 {
376 return false;
377 }
378
qed_iov_get_next_active_vf(struct qed_hwfn * p_hwfn,u16 rel_vf_id)379 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
380 u16 rel_vf_id)
381 {
382 return MAX_NUM_VFS;
383 }
384
385 static inline void
qed_iov_bulletin_set_udp_ports(struct qed_hwfn * p_hwfn,int vfid,u16 vxlan_port,u16 geneve_port)386 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
387 u16 vxlan_port, u16 geneve_port)
388 {
389 }
390
qed_iov_hw_info(struct qed_hwfn * p_hwfn)391 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
392 {
393 return 0;
394 }
395
qed_iov_alloc(struct qed_hwfn * p_hwfn)396 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
397 {
398 return 0;
399 }
400
qed_iov_setup(struct qed_hwfn * p_hwfn)401 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
402 {
403 }
404
qed_iov_free(struct qed_hwfn * p_hwfn)405 static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
406 {
407 }
408
qed_iov_free_hw_info(struct qed_dev * cdev)409 static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
410 {
411 }
412
qed_iov_mark_vf_flr(struct qed_hwfn * p_hwfn,u32 * disabled_vfs)413 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
414 u32 *disabled_vfs)
415 {
416 return false;
417 }
418
qed_iov_wq_stop(struct qed_dev * cdev,bool schedule_first)419 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
420 {
421 }
422
qed_iov_wq_start(struct qed_dev * cdev)423 static inline int qed_iov_wq_start(struct qed_dev *cdev)
424 {
425 return 0;
426 }
427
qed_schedule_iov(struct qed_hwfn * hwfn,enum qed_iov_wq_flag flag)428 static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
429 enum qed_iov_wq_flag flag)
430 {
431 }
432
qed_vf_start_iov_wq(struct qed_dev * cdev)433 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
434 {
435 }
436
qed_sriov_disable(struct qed_dev * cdev,bool pci_enabled)437 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
438 {
439 return 0;
440 }
441
qed_inform_vf_link_state(struct qed_hwfn * hwfn)442 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
443 {
444 }
445 #endif
446
447 #define qed_for_each_vf(_p_hwfn, _i) \
448 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
449 _i < MAX_NUM_VFS; \
450 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
451
452 #endif
453