1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef __MLX5_CORE_H__
34 #define __MLX5_CORE_H__
35
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/if_link.h>
40 #include <linux/firmware.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/fs.h>
43 #include <linux/mlx5/driver.h>
44 #include "lib/devcom.h"
45
46 extern uint mlx5_core_debug_mask;
47
48 #define mlx5_core_dbg(__dev, format, ...) \
49 dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
50 __func__, __LINE__, current->pid, \
51 ##__VA_ARGS__)
52
53 #define mlx5_core_dbg_once(__dev, format, ...) \
54 dev_dbg_once((__dev)->device, \
55 "%s:%d:(pid %d): " format, \
56 __func__, __LINE__, current->pid, \
57 ##__VA_ARGS__)
58
59 #define mlx5_core_dbg_mask(__dev, mask, format, ...) \
60 do { \
61 if ((mask) & mlx5_core_debug_mask) \
62 mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
63 } while (0)
64
65 #define mlx5_core_err(__dev, format, ...) \
66 dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
67 __func__, __LINE__, current->pid, \
68 ##__VA_ARGS__)
69
70 #define mlx5_core_err_rl(__dev, format, ...) \
71 dev_err_ratelimited((__dev)->device, \
72 "%s:%d:(pid %d): " format, \
73 __func__, __LINE__, current->pid, \
74 ##__VA_ARGS__)
75
76 #define mlx5_core_warn(__dev, format, ...) \
77 dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
78 __func__, __LINE__, current->pid, \
79 ##__VA_ARGS__)
80
81 #define mlx5_core_warn_once(__dev, format, ...) \
82 dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
83 __func__, __LINE__, current->pid, \
84 ##__VA_ARGS__)
85
86 #define mlx5_core_warn_rl(__dev, format, ...) \
87 dev_warn_ratelimited((__dev)->device, \
88 "%s:%d:(pid %d): " format, \
89 __func__, __LINE__, current->pid, \
90 ##__VA_ARGS__)
91
92 #define mlx5_core_info(__dev, format, ...) \
93 dev_info((__dev)->device, format, ##__VA_ARGS__)
94
95 #define mlx5_core_info_rl(__dev, format, ...) \
96 dev_info_ratelimited((__dev)->device, \
97 "%s:%d:(pid %d): " format, \
98 __func__, __LINE__, current->pid, \
99 ##__VA_ARGS__)
100
101 #define ACCESS_KEY_LEN 32
102 #define FT_ID_FT_TYPE_OFFSET 24
103
104 struct mlx5_cmd_allow_other_vhca_access_attr {
105 u16 obj_type;
106 u32 obj_id;
107 u8 access_key[ACCESS_KEY_LEN];
108 };
109
110 struct mlx5_cmd_alias_obj_create_attr {
111 u32 obj_id;
112 u16 vhca_id;
113 u16 obj_type;
114 u8 access_key[ACCESS_KEY_LEN];
115 };
116
117 struct mlx5_port_eth_proto {
118 u32 cap;
119 u32 admin;
120 u32 oper;
121 };
122
123 struct mlx5_module_eeprom_query_params {
124 u16 size;
125 u16 offset;
126 u16 i2c_address;
127 u32 page;
128 u32 bank;
129 u32 module_number;
130 };
131
mlx5_printk(struct mlx5_core_dev * dev,int level,const char * format,...)132 static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
133 {
134 struct device *device = dev->device;
135 struct va_format vaf;
136 va_list args;
137
138 if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
139 "Level %d is out of range, set to default level\n", level))
140 level = LOGLEVEL_DEFAULT;
141
142 va_start(args, format);
143 vaf.fmt = format;
144 vaf.va = &args;
145
146 dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device),
147 &vaf);
148 va_end(args);
149 }
150
151 #define mlx5_log(__dev, level, format, ...) \
152 mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \
153 __func__, __LINE__, current->pid, \
154 ##__VA_ARGS__)
155
mlx5_core_dma_dev(struct mlx5_core_dev * dev)156 static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
157 {
158 return &dev->pdev->dev;
159 }
160
161 enum {
162 MLX5_CMD_DATA, /* print command payload only */
163 MLX5_CMD_TIME, /* print command execution time */
164 };
165
166 enum {
167 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
168 MLX5_DRIVER_SYND = 0xbadd00de,
169 };
170
171 enum mlx5_semaphore_space_address {
172 MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA,
173 MLX5_SEMAPHORE_SW_RESET = 0x20,
174 };
175
176 #define MLX5_DEFAULT_PROF 2
177 #define MLX5_SF_PROF 3
178 #define MLX5_NUM_FW_CMD_THREADS 8
179 #define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS
180
mlx5_flexible_inlen(struct mlx5_core_dev * dev,size_t fixed,size_t item_size,size_t num_items,const char * func,int line)181 static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
182 size_t item_size, size_t num_items,
183 const char *func, int line)
184 {
185 int inlen;
186
187 if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
188 mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
189 __func__, func, line, fixed, item_size, num_items);
190 return -ENOMEM;
191 }
192
193 if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
194 mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
195 __func__, func, line, fixed, item_size, num_items);
196 return -ENOMEM;
197 }
198
199 if (check_add_overflow((int)fixed, inlen, &inlen)) {
200 mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
201 __func__, func, line, fixed, item_size, num_items);
202 return -ENOMEM;
203 }
204
205 return inlen;
206 }
207
208 #define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
209 mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
210
211 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
212 int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
213 enum mlx5_cap_mode cap_mode);
214 int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
215 int mlx5_query_board_id(struct mlx5_core_dev *dev);
216 int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
217 int mlx5_cmd_init(struct mlx5_core_dev *dev);
218 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
219 int mlx5_cmd_enable(struct mlx5_core_dev *dev);
220 void mlx5_cmd_disable(struct mlx5_core_dev *dev);
221 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
222 enum mlx5_cmdif_state cmdif_state);
223 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
224 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
225 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
226 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
227 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
228 void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
229 u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
230 int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
231 void mlx5_disable_device(struct mlx5_core_dev *dev);
232 int mlx5_recover_device(struct mlx5_core_dev *dev);
233 int mlx5_sriov_init(struct mlx5_core_dev *dev);
234 void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
235 int mlx5_sriov_attach(struct mlx5_core_dev *dev);
236 void mlx5_sriov_detach(struct mlx5_core_dev *dev);
237 int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
238 void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
239 int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
240 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
241 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
242 int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
243 void *context, u32 *element_id);
244 int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
245 void *context, u32 element_id,
246 u32 modify_bitmask);
247 int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
248 u32 element_id);
249 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
250
251 void mlx5_cmd_flush(struct mlx5_core_dev *dev);
252 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
253 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
254
255 int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
256 u8 access_reg_group);
257 int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
258 u8 access_reg_group);
259 int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
260 u8 feature_group, u8 access_reg_group);
261 int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
262
263 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
264 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
265 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
266 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
267 void mlx5_lag_disable_change(struct mlx5_core_dev *dev);
268 void mlx5_lag_enable_change(struct mlx5_core_dev *dev);
269
270 int mlx5_events_init(struct mlx5_core_dev *dev);
271 void mlx5_events_cleanup(struct mlx5_core_dev *dev);
272 void mlx5_events_start(struct mlx5_core_dev *dev);
273 void mlx5_events_stop(struct mlx5_core_dev *dev);
274
275 int mlx5_adev_idx_alloc(void);
276 void mlx5_adev_idx_free(int idx);
277 void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
278 int mlx5_adev_init(struct mlx5_core_dev *dev);
279
280 int mlx5_attach_device(struct mlx5_core_dev *dev);
281 void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
282 int mlx5_register_device(struct mlx5_core_dev *dev);
283 void mlx5_unregister_device(struct mlx5_core_dev *dev);
284 void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
285 bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
286
287 void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
288 int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
289 int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
290 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
291 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
292
293 struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
294 void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
295
296 void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
297 int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
298 enum mlx5_port_status status);
299 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
300 enum mlx5_port_status *status);
301 int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
302
303 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
304 int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
305 int mlx5_query_port_pause(struct mlx5_core_dev *dev,
306 u32 *rx_pause, u32 *tx_pause);
307
308 int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
309 int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
310 u8 *pfc_en_rx);
311
312 int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
313 u16 stall_critical_watermark,
314 u16 stall_minor_watermark);
315 int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
316 u16 *stall_critical_watermark,
317 u16 *stall_minor_watermark);
318
319 int mlx5_max_tc(struct mlx5_core_dev *mdev);
320 int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
321 int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
322 u8 prio, u8 *tc);
323 int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
324 int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
325 u8 tc, u8 *tc_group);
326 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
327 int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
328 u8 tc, u8 *bw_pct);
329 int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
330 u8 *max_bw_value,
331 u8 *max_bw_unit);
332 int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
333 u8 *max_bw_value,
334 u8 *max_bw_unit);
335 int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
336 int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
337
338 int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
339 int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
340 int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
341 void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
342 bool *enabled);
343 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
344 u16 offset, u16 size, u8 *data);
345 int
346 mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
347 struct mlx5_module_eeprom_query_params *params,
348 u8 *data);
349
350 int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
351 int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
352 int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
353 int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
354 int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
355 u8 *buffer_ownership);
356 int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
357 int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
358
359 int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
360 struct mlx5_port_eth_proto *eproto);
361 bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
362 u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
363 bool force_legacy);
364 u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
365 bool force_legacy);
366 int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
367
368 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
369 MLX5_CAP_GEN((mdev), pps_modify) && \
370 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
371 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
372
373 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
374 struct netlink_ext_ack *extack);
375 int mlx5_fw_version_query(struct mlx5_core_dev *dev,
376 u32 *running_ver, u32 *stored_ver);
377
378 #ifdef CONFIG_MLX5_CORE_EN
379 int mlx5e_init(void);
380 void mlx5e_cleanup(void);
381 #else
mlx5e_init(void)382 static inline int mlx5e_init(void){ return 0; }
mlx5e_cleanup(void)383 static inline void mlx5e_cleanup(void){}
384 #endif
385
mlx5_sriov_is_enabled(struct mlx5_core_dev * dev)386 static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
387 {
388 return pci_num_vf(dev->pdev) ? true : false;
389 }
390
391 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
mlx5_rescan_drivers(struct mlx5_core_dev * dev)392 static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
393 {
394 int ret;
395
396 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
397 ret = mlx5_rescan_drivers_locked(dev);
398 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
399 return ret;
400 }
401
402 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
403 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
404
mlx5_core_is_sf(const struct mlx5_core_dev * dev)405 static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
406 {
407 return dev->coredev_type == MLX5_COREDEV_SF;
408 }
409
410 static inline struct auxiliary_device *
mlx5_sf_coredev_to_adev(struct mlx5_core_dev * mdev)411 mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
412 {
413 return container_of(mdev->device, struct auxiliary_device, dev);
414 }
415
416 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
417 void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
418 int mlx5_init_one(struct mlx5_core_dev *dev);
419 int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev);
420 void mlx5_uninit_one(struct mlx5_core_dev *dev);
421 void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
422 void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
423 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
424 int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
425 int mlx5_init_one_light(struct mlx5_core_dev *dev);
426 void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
427 void mlx5_unload_one_light(struct mlx5_core_dev *dev);
428
429 int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
430 u16 opmod);
431 #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
432 mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
433
mlx5_sriov_get_vf_total_msix(struct pci_dev * pdev)434 static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
435 {
436 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
437
438 return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
439 }
440
441 bool mlx5_eth_supported(struct mlx5_core_dev *dev);
442 bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
443 bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
444 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
445 int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
446 struct mlx5_cmd_allow_other_vhca_access_attr *attr);
447 int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
448 struct mlx5_cmd_alias_obj_create_attr *alias_attr,
449 u32 *obj_id);
450 int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
451
mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev * dev)452 static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
453 {
454 return MLX5_CAP_GEN_2(dev, ec_vf_vport_base);
455 }
456
mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev * dev)457 static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev)
458 {
459 return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev);
460 }
461
mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev * dev,u16 vport_num)462 static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num)
463 {
464 int base_vport = mlx5_core_ec_vf_vport_base(dev);
465 int max_vport = base_vport + mlx5_core_max_ec_vfs(dev);
466
467 if (!mlx5_core_ec_sriov_enabled(dev))
468 return false;
469
470 return (vport_num >= base_vport && vport_num < max_vport);
471 }
472
mlx5_vport_to_func_id(const struct mlx5_core_dev * dev,u16 vport,bool ec_vf_func)473 static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
474 {
475 return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1
476 : vport;
477 }
478
mlx5_max_eq_cap_get(const struct mlx5_core_dev * dev)479 static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
480 {
481 if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
482 return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
483
484 if (MLX5_CAP_GEN(dev, max_num_eqs))
485 return MLX5_CAP_GEN(dev, max_num_eqs);
486
487 return 1 << MLX5_CAP_GEN(dev, log_max_eq);
488 }
489 #endif /* __MLX5_CORE_H__ */
490