• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef MLX5_ABI_USER_H
8 #define MLX5_ABI_USER_H
9 #include <linux/types.h>
10 #include <linux/if_ether.h>
11 #include <rdma/ib_user_ioctl_verbs.h>
12 #include <rdma/mlx5_user_ioctl_verbs.h>
13 enum {
14   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
15   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
16   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
17   MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
18   MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
19   MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
20   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
21   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
22   MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
23   MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
24   MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
25   MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
26 };
27 enum {
28   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
29 };
30 enum {
31   MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
32 };
33 #define MLX5_IB_UVERBS_ABI_VERSION 1
34 struct mlx5_ib_alloc_ucontext_req {
35   __u32 total_num_bfregs;
36   __u32 num_low_latency_bfregs;
37 };
38 enum mlx5_lib_caps {
39   MLX5_LIB_CAP_4K_UAR = (__u64) 1 << 0,
40   MLX5_LIB_CAP_DYN_UAR = (__u64) 1 << 1,
41 };
42 enum mlx5_ib_alloc_uctx_v2_flags {
43   MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
44 };
45 struct mlx5_ib_alloc_ucontext_req_v2 {
46   __u32 total_num_bfregs;
47   __u32 num_low_latency_bfregs;
48   __u32 flags;
49   __u32 comp_mask;
50   __u8 max_cqe_version;
51   __u8 reserved0;
52   __u16 reserved1;
53   __u32 reserved2;
54   __aligned_u64 lib_caps;
55 };
56 enum mlx5_ib_alloc_ucontext_resp_mask {
57   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
58   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
59   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
60   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
61   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
62   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG = 1UL << 5,
63 };
64 enum mlx5_user_cmds_supp_uhw {
65   MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
66   MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
67 };
68 enum mlx5_user_inline_mode {
69   MLX5_USER_INLINE_MODE_NA,
70   MLX5_USER_INLINE_MODE_NONE,
71   MLX5_USER_INLINE_MODE_L2,
72   MLX5_USER_INLINE_MODE_IP,
73   MLX5_USER_INLINE_MODE_TCP_UDP,
74 };
75 enum {
76   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
77   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
78   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
79   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
80   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
81 };
82 struct mlx5_ib_alloc_ucontext_resp {
83   __u32 qp_tab_size;
84   __u32 bf_reg_size;
85   __u32 tot_bfregs;
86   __u32 cache_line_size;
87   __u16 max_sq_desc_sz;
88   __u16 max_rq_desc_sz;
89   __u32 max_send_wqebb;
90   __u32 max_recv_wr;
91   __u32 max_srq_recv_wr;
92   __u16 num_ports;
93   __u16 flow_action_flags;
94   __u32 comp_mask;
95   __u32 response_length;
96   __u8 cqe_version;
97   __u8 cmds_supp_uhw;
98   __u8 eth_min_inline;
99   __u8 clock_info_versions;
100   __aligned_u64 hca_core_clock_offset;
101   __u32 log_uar_size;
102   __u32 num_uars_per_page;
103   __u32 num_dyn_bfregs;
104   __u32 dump_fill_mkey;
105 };
106 struct mlx5_ib_alloc_pd_resp {
107   __u32 pdn;
108 };
109 struct mlx5_ib_tso_caps {
110   __u32 max_tso;
111   __u32 supported_qpts;
112 };
113 struct mlx5_ib_rss_caps {
114   __aligned_u64 rx_hash_fields_mask;
115   __u8 rx_hash_function;
116   __u8 reserved[7];
117 };
118 enum mlx5_ib_cqe_comp_res_format {
119   MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
120   MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
121   MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
122 };
123 struct mlx5_ib_cqe_comp_caps {
124   __u32 max_num;
125   __u32 supported_format;
126 };
127 enum mlx5_ib_packet_pacing_cap_flags {
128   MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
129 };
130 struct mlx5_packet_pacing_caps {
131   __u32 qp_rate_limit_min;
132   __u32 qp_rate_limit_max;
133   __u32 supported_qpts;
134   __u8 cap_flags;
135   __u8 reserved[3];
136 };
137 enum mlx5_ib_mpw_caps {
138   MPW_RESERVED = 1 << 0,
139   MLX5_IB_ALLOW_MPW = 1 << 1,
140   MLX5_IB_SUPPORT_EMPW = 1 << 2,
141 };
142 enum mlx5_ib_sw_parsing_offloads {
143   MLX5_IB_SW_PARSING = 1 << 0,
144   MLX5_IB_SW_PARSING_CSUM = 1 << 1,
145   MLX5_IB_SW_PARSING_LSO = 1 << 2,
146 };
147 struct mlx5_ib_sw_parsing_caps {
148   __u32 sw_parsing_offloads;
149   __u32 supported_qpts;
150 };
151 struct mlx5_ib_striding_rq_caps {
152   __u32 min_single_stride_log_num_of_bytes;
153   __u32 max_single_stride_log_num_of_bytes;
154   __u32 min_single_wqe_log_num_of_strides;
155   __u32 max_single_wqe_log_num_of_strides;
156   __u32 supported_qpts;
157   __u32 reserved;
158 };
159 struct mlx5_ib_dci_streams_caps {
160   __u8 max_log_num_concurent;
161   __u8 max_log_num_errored;
162 };
163 enum mlx5_ib_query_dev_resp_flags {
164   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
165   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
166   MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
167   MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
168 };
169 enum mlx5_ib_tunnel_offloads {
170   MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
171   MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
172   MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
173   MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
174   MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
175 };
176 struct mlx5_ib_query_device_resp {
177   __u32 comp_mask;
178   __u32 response_length;
179   struct mlx5_ib_tso_caps tso_caps;
180   struct mlx5_ib_rss_caps rss_caps;
181   struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
182   struct mlx5_packet_pacing_caps packet_pacing_caps;
183   __u32 mlx5_ib_support_multi_pkt_send_wqes;
184   __u32 flags;
185   struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
186   struct mlx5_ib_striding_rq_caps striding_rq_caps;
187   __u32 tunnel_offloads_caps;
188   struct mlx5_ib_dci_streams_caps dci_streams_caps;
189   __u16 reserved;
190   struct mlx5_ib_uapi_reg reg_c0;
191 };
192 enum mlx5_ib_create_cq_flags {
193   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
194   MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
195   MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2,
196 };
197 struct mlx5_ib_create_cq {
198   __aligned_u64 buf_addr;
199   __aligned_u64 db_addr;
200   __u32 cqe_size;
201   __u8 cqe_comp_en;
202   __u8 cqe_comp_res_format;
203   __u16 flags;
204   __u16 uar_page_index;
205   __u16 reserved0;
206   __u32 reserved1;
207 };
208 struct mlx5_ib_create_cq_resp {
209   __u32 cqn;
210   __u32 reserved;
211 };
212 struct mlx5_ib_resize_cq {
213   __aligned_u64 buf_addr;
214   __u16 cqe_size;
215   __u16 reserved0;
216   __u32 reserved1;
217 };
218 struct mlx5_ib_create_srq {
219   __aligned_u64 buf_addr;
220   __aligned_u64 db_addr;
221   __u32 flags;
222   __u32 reserved0;
223   __u32 uidx;
224   __u32 reserved1;
225 };
226 struct mlx5_ib_create_srq_resp {
227   __u32 srqn;
228   __u32 reserved;
229 };
230 struct mlx5_ib_create_qp_dci_streams {
231   __u8 log_num_concurent;
232   __u8 log_num_errored;
233 };
234 struct mlx5_ib_create_qp {
235   __aligned_u64 buf_addr;
236   __aligned_u64 db_addr;
237   __u32 sq_wqe_count;
238   __u32 rq_wqe_count;
239   __u32 rq_wqe_shift;
240   __u32 flags;
241   __u32 uidx;
242   __u32 bfreg_index;
243   union {
244     __aligned_u64 sq_buf_addr;
245     __aligned_u64 access_key;
246   };
247   __u32 ece_options;
248   struct mlx5_ib_create_qp_dci_streams dci_streams;
249   __u16 reserved;
250 };
251 enum mlx5_rx_hash_function_flags {
252   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
253 };
254 enum mlx5_rx_hash_fields {
255   MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
256   MLX5_RX_HASH_DST_IPV4 = 1 << 1,
257   MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
258   MLX5_RX_HASH_DST_IPV6 = 1 << 3,
259   MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
260   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
261   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
262   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
263   MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
264   MLX5_RX_HASH_INNER = (1UL << 31),
265 };
266 struct mlx5_ib_create_qp_rss {
267   __aligned_u64 rx_hash_fields_mask;
268   __u8 rx_hash_function;
269   __u8 rx_key_len;
270   __u8 reserved[6];
271   __u8 rx_hash_key[128];
272   __u32 comp_mask;
273   __u32 flags;
274 };
275 enum mlx5_ib_create_qp_resp_mask {
276   MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
277   MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
278   MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
279   MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
280   MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
281 };
282 struct mlx5_ib_create_qp_resp {
283   __u32 bfreg_index;
284   __u32 ece_options;
285   __u32 comp_mask;
286   __u32 tirn;
287   __u32 tisn;
288   __u32 rqn;
289   __u32 sqn;
290   __u32 reserved1;
291   __u64 tir_icm_addr;
292 };
293 struct mlx5_ib_alloc_mw {
294   __u32 comp_mask;
295   __u8 num_klms;
296   __u8 reserved1;
297   __u16 reserved2;
298 };
299 enum mlx5_ib_create_wq_mask {
300   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
301 };
302 struct mlx5_ib_create_wq {
303   __aligned_u64 buf_addr;
304   __aligned_u64 db_addr;
305   __u32 rq_wqe_count;
306   __u32 rq_wqe_shift;
307   __u32 user_index;
308   __u32 flags;
309   __u32 comp_mask;
310   __u32 single_stride_log_num_of_bytes;
311   __u32 single_wqe_log_num_of_strides;
312   __u32 two_byte_shift_en;
313 };
314 struct mlx5_ib_create_ah_resp {
315   __u32 response_length;
316   __u8 dmac[ETH_ALEN];
317   __u8 reserved[6];
318 };
319 struct mlx5_ib_burst_info {
320   __u32 max_burst_sz;
321   __u16 typical_pkt_sz;
322   __u16 reserved;
323 };
324 struct mlx5_ib_modify_qp {
325   __u32 comp_mask;
326   struct mlx5_ib_burst_info burst_info;
327   __u32 ece_options;
328 };
329 struct mlx5_ib_modify_qp_resp {
330   __u32 response_length;
331   __u32 dctn;
332   __u32 ece_options;
333   __u32 reserved;
334 };
335 struct mlx5_ib_create_wq_resp {
336   __u32 response_length;
337   __u32 reserved;
338 };
339 struct mlx5_ib_create_rwq_ind_tbl_resp {
340   __u32 response_length;
341   __u32 reserved;
342 };
343 struct mlx5_ib_modify_wq {
344   __u32 comp_mask;
345   __u32 reserved;
346 };
347 struct mlx5_ib_clock_info {
348   __u32 sign;
349   __u32 resv;
350   __aligned_u64 nsec;
351   __aligned_u64 cycles;
352   __aligned_u64 frac;
353   __u32 mult;
354   __u32 shift;
355   __aligned_u64 mask;
356   __aligned_u64 overflow_period;
357 };
358 enum mlx5_ib_mmap_cmd {
359   MLX5_IB_MMAP_REGULAR_PAGE = 0,
360   MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
361   MLX5_IB_MMAP_WC_PAGE = 2,
362   MLX5_IB_MMAP_NC_PAGE = 3,
363   MLX5_IB_MMAP_CORE_CLOCK = 5,
364   MLX5_IB_MMAP_ALLOC_WC = 6,
365   MLX5_IB_MMAP_CLOCK_INFO = 7,
366   MLX5_IB_MMAP_DEVICE_MEM = 8,
367 };
368 enum {
369   MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
370 };
371 enum {
372   MLX5_IB_CLOCK_INFO_V1 = 0,
373 };
374 struct mlx5_ib_flow_counters_desc {
375   __u32 description;
376   __u32 index;
377 };
378 struct mlx5_ib_flow_counters_data {
379   RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
380   __u32 ncounters;
381   __u32 reserved;
382 };
383 struct mlx5_ib_create_flow {
384   __u32 ncounters_data;
385   __u32 reserved;
386   struct mlx5_ib_flow_counters_data data[];
387 };
388 #endif
389