• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /****************************************************************************
2  ****************************************************************************
3  ***
4  ***   This header was automatically generated from a Linux kernel header
5  ***   of the same name, to make information necessary for userspace to
6  ***   call into the kernel available to libc.  It contains only constants,
7  ***   structures, and macros generated from the original header, and thus,
8  ***   contains no copyrightable information.
9  ***
10  ***   To edit the content of this header, modify the corresponding
11  ***   source file (e.g. under external/kernel-headers/original/) then
12  ***   run bionic/libc/kernel/tools/update_all.py
13  ***
14  ***   Any manual change here will be lost the next time this script will
15  ***   be run. You've been warned!
16  ***
17  ****************************************************************************
18  ****************************************************************************/
19 #ifndef MLX5_ABI_USER_H
20 #define MLX5_ABI_USER_H
21 #include <linux/types.h>
22 #include <linux/if_ether.h>
23 #include <rdma/ib_user_ioctl_verbs.h>
24 enum {
25   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
26   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
27   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
28   MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
29   MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
30   MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
31   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
32   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
33   MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
34   MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
35   MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
36   MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
37 };
38 enum {
39   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
40 };
41 enum {
42   MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
43 };
44 #define MLX5_IB_UVERBS_ABI_VERSION 1
45 struct mlx5_ib_alloc_ucontext_req {
46   __u32 total_num_bfregs;
47   __u32 num_low_latency_bfregs;
48 };
49 enum mlx5_lib_caps {
50   MLX5_LIB_CAP_4K_UAR = (__u64) 1 << 0,
51   MLX5_LIB_CAP_DYN_UAR = (__u64) 1 << 1,
52 };
53 enum mlx5_ib_alloc_uctx_v2_flags {
54   MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
55 };
56 struct mlx5_ib_alloc_ucontext_req_v2 {
57   __u32 total_num_bfregs;
58   __u32 num_low_latency_bfregs;
59   __u32 flags;
60   __u32 comp_mask;
61   __u8 max_cqe_version;
62   __u8 reserved0;
63   __u16 reserved1;
64   __u32 reserved2;
65   __aligned_u64 lib_caps;
66 };
67 enum mlx5_ib_alloc_ucontext_resp_mask {
68   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
69   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
70   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
71   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
72   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
73 };
74 enum mlx5_user_cmds_supp_uhw {
75   MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
76   MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
77 };
78 enum mlx5_user_inline_mode {
79   MLX5_USER_INLINE_MODE_NA,
80   MLX5_USER_INLINE_MODE_NONE,
81   MLX5_USER_INLINE_MODE_L2,
82   MLX5_USER_INLINE_MODE_IP,
83   MLX5_USER_INLINE_MODE_TCP_UDP,
84 };
85 enum {
86   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
87   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
88   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
89   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
90   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
91 };
92 struct mlx5_ib_alloc_ucontext_resp {
93   __u32 qp_tab_size;
94   __u32 bf_reg_size;
95   __u32 tot_bfregs;
96   __u32 cache_line_size;
97   __u16 max_sq_desc_sz;
98   __u16 max_rq_desc_sz;
99   __u32 max_send_wqebb;
100   __u32 max_recv_wr;
101   __u32 max_srq_recv_wr;
102   __u16 num_ports;
103   __u16 flow_action_flags;
104   __u32 comp_mask;
105   __u32 response_length;
106   __u8 cqe_version;
107   __u8 cmds_supp_uhw;
108   __u8 eth_min_inline;
109   __u8 clock_info_versions;
110   __aligned_u64 hca_core_clock_offset;
111   __u32 log_uar_size;
112   __u32 num_uars_per_page;
113   __u32 num_dyn_bfregs;
114   __u32 dump_fill_mkey;
115 };
116 struct mlx5_ib_alloc_pd_resp {
117   __u32 pdn;
118 };
119 struct mlx5_ib_tso_caps {
120   __u32 max_tso;
121   __u32 supported_qpts;
122 };
123 struct mlx5_ib_rss_caps {
124   __aligned_u64 rx_hash_fields_mask;
125   __u8 rx_hash_function;
126   __u8 reserved[7];
127 };
128 enum mlx5_ib_cqe_comp_res_format {
129   MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
130   MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
131   MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
132 };
133 struct mlx5_ib_cqe_comp_caps {
134   __u32 max_num;
135   __u32 supported_format;
136 };
137 enum mlx5_ib_packet_pacing_cap_flags {
138   MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
139 };
140 struct mlx5_packet_pacing_caps {
141   __u32 qp_rate_limit_min;
142   __u32 qp_rate_limit_max;
143   __u32 supported_qpts;
144   __u8 cap_flags;
145   __u8 reserved[3];
146 };
147 enum mlx5_ib_mpw_caps {
148   MPW_RESERVED = 1 << 0,
149   MLX5_IB_ALLOW_MPW = 1 << 1,
150   MLX5_IB_SUPPORT_EMPW = 1 << 2,
151 };
152 enum mlx5_ib_sw_parsing_offloads {
153   MLX5_IB_SW_PARSING = 1 << 0,
154   MLX5_IB_SW_PARSING_CSUM = 1 << 1,
155   MLX5_IB_SW_PARSING_LSO = 1 << 2,
156 };
157 struct mlx5_ib_sw_parsing_caps {
158   __u32 sw_parsing_offloads;
159   __u32 supported_qpts;
160 };
161 struct mlx5_ib_striding_rq_caps {
162   __u32 min_single_stride_log_num_of_bytes;
163   __u32 max_single_stride_log_num_of_bytes;
164   __u32 min_single_wqe_log_num_of_strides;
165   __u32 max_single_wqe_log_num_of_strides;
166   __u32 supported_qpts;
167   __u32 reserved;
168 };
169 struct mlx5_ib_dci_streams_caps {
170   __u8 max_log_num_concurent;
171   __u8 max_log_num_errored;
172 };
173 enum mlx5_ib_query_dev_resp_flags {
174   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
175   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
176   MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
177   MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
178 };
179 enum mlx5_ib_tunnel_offloads {
180   MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
181   MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
182   MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
183   MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
184   MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
185 };
186 struct mlx5_ib_query_device_resp {
187   __u32 comp_mask;
188   __u32 response_length;
189   struct mlx5_ib_tso_caps tso_caps;
190   struct mlx5_ib_rss_caps rss_caps;
191   struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
192   struct mlx5_packet_pacing_caps packet_pacing_caps;
193   __u32 mlx5_ib_support_multi_pkt_send_wqes;
194   __u32 flags;
195   struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
196   struct mlx5_ib_striding_rq_caps striding_rq_caps;
197   __u32 tunnel_offloads_caps;
198   struct mlx5_ib_dci_streams_caps dci_streams_caps;
199   __u16 reserved;
200 };
201 enum mlx5_ib_create_cq_flags {
202   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
203   MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
204   MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2,
205 };
206 struct mlx5_ib_create_cq {
207   __aligned_u64 buf_addr;
208   __aligned_u64 db_addr;
209   __u32 cqe_size;
210   __u8 cqe_comp_en;
211   __u8 cqe_comp_res_format;
212   __u16 flags;
213   __u16 uar_page_index;
214   __u16 reserved0;
215   __u32 reserved1;
216 };
217 struct mlx5_ib_create_cq_resp {
218   __u32 cqn;
219   __u32 reserved;
220 };
221 struct mlx5_ib_resize_cq {
222   __aligned_u64 buf_addr;
223   __u16 cqe_size;
224   __u16 reserved0;
225   __u32 reserved1;
226 };
227 struct mlx5_ib_create_srq {
228   __aligned_u64 buf_addr;
229   __aligned_u64 db_addr;
230   __u32 flags;
231   __u32 reserved0;
232   __u32 uidx;
233   __u32 reserved1;
234 };
235 struct mlx5_ib_create_srq_resp {
236   __u32 srqn;
237   __u32 reserved;
238 };
239 struct mlx5_ib_create_qp_dci_streams {
240   __u8 log_num_concurent;
241   __u8 log_num_errored;
242 };
243 struct mlx5_ib_create_qp {
244   __aligned_u64 buf_addr;
245   __aligned_u64 db_addr;
246   __u32 sq_wqe_count;
247   __u32 rq_wqe_count;
248   __u32 rq_wqe_shift;
249   __u32 flags;
250   __u32 uidx;
251   __u32 bfreg_index;
252   union {
253     __aligned_u64 sq_buf_addr;
254     __aligned_u64 access_key;
255   };
256   __u32 ece_options;
257   struct mlx5_ib_create_qp_dci_streams dci_streams;
258   __u16 reserved;
259 };
260 enum mlx5_rx_hash_function_flags {
261   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
262 };
263 enum mlx5_rx_hash_fields {
264   MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
265   MLX5_RX_HASH_DST_IPV4 = 1 << 1,
266   MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
267   MLX5_RX_HASH_DST_IPV6 = 1 << 3,
268   MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
269   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
270   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
271   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
272   MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
273   MLX5_RX_HASH_INNER = (1UL << 31),
274 };
275 struct mlx5_ib_create_qp_rss {
276   __aligned_u64 rx_hash_fields_mask;
277   __u8 rx_hash_function;
278   __u8 rx_key_len;
279   __u8 reserved[6];
280   __u8 rx_hash_key[128];
281   __u32 comp_mask;
282   __u32 flags;
283 };
284 enum mlx5_ib_create_qp_resp_mask {
285   MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
286   MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
287   MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
288   MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
289   MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
290 };
291 struct mlx5_ib_create_qp_resp {
292   __u32 bfreg_index;
293   __u32 ece_options;
294   __u32 comp_mask;
295   __u32 tirn;
296   __u32 tisn;
297   __u32 rqn;
298   __u32 sqn;
299   __u32 reserved1;
300   __u64 tir_icm_addr;
301 };
302 struct mlx5_ib_alloc_mw {
303   __u32 comp_mask;
304   __u8 num_klms;
305   __u8 reserved1;
306   __u16 reserved2;
307 };
308 enum mlx5_ib_create_wq_mask {
309   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
310 };
311 struct mlx5_ib_create_wq {
312   __aligned_u64 buf_addr;
313   __aligned_u64 db_addr;
314   __u32 rq_wqe_count;
315   __u32 rq_wqe_shift;
316   __u32 user_index;
317   __u32 flags;
318   __u32 comp_mask;
319   __u32 single_stride_log_num_of_bytes;
320   __u32 single_wqe_log_num_of_strides;
321   __u32 two_byte_shift_en;
322 };
323 struct mlx5_ib_create_ah_resp {
324   __u32 response_length;
325   __u8 dmac[ETH_ALEN];
326   __u8 reserved[6];
327 };
328 struct mlx5_ib_burst_info {
329   __u32 max_burst_sz;
330   __u16 typical_pkt_sz;
331   __u16 reserved;
332 };
333 struct mlx5_ib_modify_qp {
334   __u32 comp_mask;
335   struct mlx5_ib_burst_info burst_info;
336   __u32 ece_options;
337 };
338 struct mlx5_ib_modify_qp_resp {
339   __u32 response_length;
340   __u32 dctn;
341   __u32 ece_options;
342   __u32 reserved;
343 };
344 struct mlx5_ib_create_wq_resp {
345   __u32 response_length;
346   __u32 reserved;
347 };
348 struct mlx5_ib_create_rwq_ind_tbl_resp {
349   __u32 response_length;
350   __u32 reserved;
351 };
352 struct mlx5_ib_modify_wq {
353   __u32 comp_mask;
354   __u32 reserved;
355 };
356 struct mlx5_ib_clock_info {
357   __u32 sign;
358   __u32 resv;
359   __aligned_u64 nsec;
360   __aligned_u64 cycles;
361   __aligned_u64 frac;
362   __u32 mult;
363   __u32 shift;
364   __aligned_u64 mask;
365   __aligned_u64 overflow_period;
366 };
367 enum mlx5_ib_mmap_cmd {
368   MLX5_IB_MMAP_REGULAR_PAGE = 0,
369   MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
370   MLX5_IB_MMAP_WC_PAGE = 2,
371   MLX5_IB_MMAP_NC_PAGE = 3,
372   MLX5_IB_MMAP_CORE_CLOCK = 5,
373   MLX5_IB_MMAP_ALLOC_WC = 6,
374   MLX5_IB_MMAP_CLOCK_INFO = 7,
375   MLX5_IB_MMAP_DEVICE_MEM = 8,
376 };
377 enum {
378   MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
379 };
380 enum {
381   MLX5_IB_CLOCK_INFO_V1 = 0,
382 };
383 struct mlx5_ib_flow_counters_desc {
384   __u32 description;
385   __u32 index;
386 };
387 struct mlx5_ib_flow_counters_data {
388   RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
389   __u32 ncounters;
390   __u32 reserved;
391 };
392 struct mlx5_ib_create_flow {
393   __u32 ncounters_data;
394   __u32 reserved;
395   struct mlx5_ib_flow_counters_data data[];
396 };
397 #endif
398