• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This header was generated from the Linux kernel headers by update_headers.py,
3  * to provide necessary information from kernel to userspace, such as constants,
4  * structures, and macros, and thus, contains no copyrightable information.
5  */
6 #ifndef MLX5_ABI_USER_H
7 #define MLX5_ABI_USER_H
8 #include <linux/types.h>
9 #include <linux/if_ether.h>
10 #include <rdma/ib_user_ioctl_verbs.h>
11 enum {
12 	MLX5_QP_FLAG_SIGNATURE		= 1 << 0,
13 	MLX5_QP_FLAG_SCATTER_CQE	= 1 << 1,
14 	MLX5_QP_FLAG_TUNNEL_OFFLOADS	= 1 << 2,
15 	MLX5_QP_FLAG_BFREG_INDEX	= 1 << 3,
16 	MLX5_QP_FLAG_TYPE_DCT		= 1 << 4,
17 	MLX5_QP_FLAG_TYPE_DCI		= 1 << 5,
18 };
19 enum {
20 	MLX5_SRQ_FLAG_SIGNATURE		= 1 << 0,
21 };
22 enum {
23 	MLX5_WQ_FLAG_SIGNATURE		= 1 << 0,
24 };
25 #define MLX5_IB_UVERBS_ABI_VERSION	1
26 struct mlx5_ib_alloc_ucontext_req {
27 	__u32	total_num_bfregs;
28 	__u32	num_low_latency_bfregs;
29 };
30 enum mlx5_lib_caps {
31 	MLX5_LIB_CAP_4K_UAR	= (__u64)1 << 0,
32 };
33 enum mlx5_ib_alloc_uctx_v2_flags {
34 	MLX5_IB_ALLOC_UCTX_DEVX	= 1 << 0,
35 };
36 struct mlx5_ib_alloc_ucontext_req_v2 {
37 	__u32	total_num_bfregs;
38 	__u32	num_low_latency_bfregs;
39 	__u32	flags;
40 	__u32	comp_mask;
41 	__u8	max_cqe_version;
42 	__u8	reserved0;
43 	__u16	reserved1;
44 	__u32	reserved2;
45 	__aligned_u64 lib_caps;
46 };
47 enum mlx5_ib_alloc_ucontext_resp_mask {
48 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
49 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY    = 1UL << 1,
50 };
51 enum mlx5_user_cmds_supp_uhw {
52 	MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
53 	MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
54 };
55 enum mlx5_user_inline_mode {
56 	MLX5_USER_INLINE_MODE_NA,
57 	MLX5_USER_INLINE_MODE_NONE,
58 	MLX5_USER_INLINE_MODE_L2,
59 	MLX5_USER_INLINE_MODE_IP,
60 	MLX5_USER_INLINE_MODE_TCP_UDP,
61 };
62 enum {
63 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
64 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
65 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
66 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
67 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
68 };
69 struct mlx5_ib_alloc_ucontext_resp {
70 	__u32	qp_tab_size;
71 	__u32	bf_reg_size;
72 	__u32	tot_bfregs;
73 	__u32	cache_line_size;
74 	__u16	max_sq_desc_sz;
75 	__u16	max_rq_desc_sz;
76 	__u32	max_send_wqebb;
77 	__u32	max_recv_wr;
78 	__u32	max_srq_recv_wr;
79 	__u16	num_ports;
80 	__u16	flow_action_flags;
81 	__u32	comp_mask;
82 	__u32	response_length;
83 	__u8	cqe_version;
84 	__u8	cmds_supp_uhw;
85 	__u8	eth_min_inline;
86 	__u8	clock_info_versions;
87 	__aligned_u64 hca_core_clock_offset;
88 	__u32	log_uar_size;
89 	__u32	num_uars_per_page;
90 	__u32	num_dyn_bfregs;
91 	__u32	dump_fill_mkey;
92 };
93 struct mlx5_ib_alloc_pd_resp {
94 	__u32	pdn;
95 };
96 struct mlx5_ib_tso_caps {
97 	__u32 max_tso;
98 
99 	__u32 supported_qpts;
100 };
101 struct mlx5_ib_rss_caps {
102 	__aligned_u64 rx_hash_fields_mask;
103 	__u8 rx_hash_function;
104 	__u8 reserved[7];
105 };
106 enum mlx5_ib_cqe_comp_res_format {
107 	MLX5_IB_CQE_RES_FORMAT_HASH	= 1 << 0,
108 	MLX5_IB_CQE_RES_FORMAT_CSUM	= 1 << 1,
109 	MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
110 };
111 struct mlx5_ib_cqe_comp_caps {
112 	__u32 max_num;
113 	__u32 supported_format;
114 };
115 enum mlx5_ib_packet_pacing_cap_flags {
116 	MLX5_IB_PP_SUPPORT_BURST	= 1 << 0,
117 };
118 struct mlx5_packet_pacing_caps {
119 	__u32 qp_rate_limit_min;
120 	__u32 qp_rate_limit_max;
121 
122 	__u32 supported_qpts;
123 	__u8  cap_flags;
124 	__u8  reserved[3];
125 };
126 enum mlx5_ib_mpw_caps {
127 	MPW_RESERVED		= 1 << 0,
128 	MLX5_IB_ALLOW_MPW	= 1 << 1,
129 	MLX5_IB_SUPPORT_EMPW	= 1 << 2,
130 };
131 enum mlx5_ib_sw_parsing_offloads {
132 	MLX5_IB_SW_PARSING = 1 << 0,
133 	MLX5_IB_SW_PARSING_CSUM = 1 << 1,
134 	MLX5_IB_SW_PARSING_LSO = 1 << 2,
135 };
136 struct mlx5_ib_sw_parsing_caps {
137 	__u32 sw_parsing_offloads;
138 
139 	__u32 supported_qpts;
140 };
141 struct mlx5_ib_striding_rq_caps {
142 	__u32 min_single_stride_log_num_of_bytes;
143 	__u32 max_single_stride_log_num_of_bytes;
144 	__u32 min_single_wqe_log_num_of_strides;
145 	__u32 max_single_wqe_log_num_of_strides;
146 
147 	__u32 supported_qpts;
148 	__u32 reserved;
149 };
150 enum mlx5_ib_query_dev_resp_flags {
151 
152 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
153 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
154 };
155 enum mlx5_ib_tunnel_offloads {
156 	MLX5_IB_TUNNELED_OFFLOADS_VXLAN  = 1 << 0,
157 	MLX5_IB_TUNNELED_OFFLOADS_GRE    = 1 << 1,
158 	MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
159 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
160 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
161 };
162 struct mlx5_ib_query_device_resp {
163 	__u32	comp_mask;
164 	__u32	response_length;
165 	struct	mlx5_ib_tso_caps tso_caps;
166 	struct	mlx5_ib_rss_caps rss_caps;
167 	struct	mlx5_ib_cqe_comp_caps cqe_comp_caps;
168 	struct	mlx5_packet_pacing_caps packet_pacing_caps;
169 	__u32	mlx5_ib_support_multi_pkt_send_wqes;
170 	__u32	flags;
171 	struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
172 	struct mlx5_ib_striding_rq_caps striding_rq_caps;
173 	__u32	tunnel_offloads_caps;
174 	__u32	reserved;
175 };
176 enum mlx5_ib_create_cq_flags {
177 	MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD	= 1 << 0,
178 };
179 struct mlx5_ib_create_cq {
180 	__aligned_u64 buf_addr;
181 	__aligned_u64 db_addr;
182 	__u32	cqe_size;
183 	__u8    cqe_comp_en;
184 	__u8    cqe_comp_res_format;
185 	__u16	flags;
186 };
187 struct mlx5_ib_create_cq_resp {
188 	__u32	cqn;
189 	__u32	reserved;
190 };
191 struct mlx5_ib_resize_cq {
192 	__aligned_u64 buf_addr;
193 	__u16	cqe_size;
194 	__u16	reserved0;
195 	__u32	reserved1;
196 };
197 struct mlx5_ib_create_srq {
198 	__aligned_u64 buf_addr;
199 	__aligned_u64 db_addr;
200 	__u32	flags;
201 	__u32	reserved0;
202 	__u32	uidx;
203 	__u32	reserved1;
204 };
205 struct mlx5_ib_create_srq_resp {
206 	__u32	srqn;
207 	__u32	reserved;
208 };
209 struct mlx5_ib_create_qp {
210 	__aligned_u64 buf_addr;
211 	__aligned_u64 db_addr;
212 	__u32	sq_wqe_count;
213 	__u32	rq_wqe_count;
214 	__u32	rq_wqe_shift;
215 	__u32	flags;
216 	__u32	uidx;
217 	__u32	bfreg_index;
218 	union {
219 		__aligned_u64 sq_buf_addr;
220 		__aligned_u64 access_key;
221 	};
222 };
223 enum mlx5_rx_hash_function_flags {
224 	MLX5_RX_HASH_FUNC_TOEPLITZ	= 1 << 0,
225 };
226 enum mlx5_rx_hash_fields {
227 	MLX5_RX_HASH_SRC_IPV4	= 1 << 0,
228 	MLX5_RX_HASH_DST_IPV4	= 1 << 1,
229 	MLX5_RX_HASH_SRC_IPV6	= 1 << 2,
230 	MLX5_RX_HASH_DST_IPV6	= 1 << 3,
231 	MLX5_RX_HASH_SRC_PORT_TCP	= 1 << 4,
232 	MLX5_RX_HASH_DST_PORT_TCP	= 1 << 5,
233 	MLX5_RX_HASH_SRC_PORT_UDP	= 1 << 6,
234 	MLX5_RX_HASH_DST_PORT_UDP	= 1 << 7,
235 	MLX5_RX_HASH_IPSEC_SPI		= 1 << 8,
236 
237 	MLX5_RX_HASH_INNER		= (1UL << 31),
238 };
239 struct mlx5_ib_create_qp_rss {
240 	__aligned_u64 rx_hash_fields_mask;
241 	__u8 rx_hash_function;
242 	__u8 rx_key_len;
243 	__u8 reserved[6];
244 	__u8 rx_hash_key[128];
245 	__u32   comp_mask;
246 	__u32	flags;
247 };
248 struct mlx5_ib_create_qp_resp {
249 	__u32	bfreg_index;
250 	__u32   reserved;
251 };
252 struct mlx5_ib_alloc_mw {
253 	__u32	comp_mask;
254 	__u8	num_klms;
255 	__u8	reserved1;
256 	__u16	reserved2;
257 };
258 enum mlx5_ib_create_wq_mask {
259 	MLX5_IB_CREATE_WQ_STRIDING_RQ	= (1 << 0),
260 };
261 struct mlx5_ib_create_wq {
262 	__aligned_u64 buf_addr;
263 	__aligned_u64 db_addr;
264 	__u32   rq_wqe_count;
265 	__u32   rq_wqe_shift;
266 	__u32   user_index;
267 	__u32   flags;
268 	__u32   comp_mask;
269 	__u32	single_stride_log_num_of_bytes;
270 	__u32	single_wqe_log_num_of_strides;
271 	__u32	two_byte_shift_en;
272 };
273 struct mlx5_ib_create_ah_resp {
274 	__u32	response_length;
275 	__u8	dmac[ETH_ALEN];
276 	__u8	reserved[6];
277 };
278 struct mlx5_ib_burst_info {
279 	__u32       max_burst_sz;
280 	__u16       typical_pkt_sz;
281 	__u16       reserved;
282 };
283 struct mlx5_ib_modify_qp {
284 	__u32			   comp_mask;
285 	struct mlx5_ib_burst_info  burst_info;
286 	__u32			   reserved;
287 };
288 struct mlx5_ib_modify_qp_resp {
289 	__u32	response_length;
290 	__u32	dctn;
291 };
292 struct mlx5_ib_create_wq_resp {
293 	__u32	response_length;
294 	__u32	reserved;
295 };
296 struct mlx5_ib_create_rwq_ind_tbl_resp {
297 	__u32	response_length;
298 	__u32	reserved;
299 };
300 struct mlx5_ib_modify_wq {
301 	__u32	comp_mask;
302 	__u32	reserved;
303 };
304 struct mlx5_ib_clock_info {
305 	__u32 sign;
306 	__u32 resv;
307 	__aligned_u64 nsec;
308 	__aligned_u64 cycles;
309 	__aligned_u64 frac;
310 	__u32 mult;
311 	__u32 shift;
312 	__aligned_u64 mask;
313 	__aligned_u64 overflow_period;
314 };
315 enum mlx5_ib_mmap_cmd {
316 	MLX5_IB_MMAP_REGULAR_PAGE               = 0,
317 	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES       = 1,
318 	MLX5_IB_MMAP_WC_PAGE                    = 2,
319 	MLX5_IB_MMAP_NC_PAGE                    = 3,
320 
321 	MLX5_IB_MMAP_CORE_CLOCK                 = 5,
322 	MLX5_IB_MMAP_ALLOC_WC                   = 6,
323 	MLX5_IB_MMAP_CLOCK_INFO                 = 7,
324 	MLX5_IB_MMAP_DEVICE_MEM                 = 8,
325 };
326 enum {
327 	MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
328 };
329 enum {
330 	MLX5_IB_CLOCK_INFO_V1              = 0,
331 };
332 struct mlx5_ib_flow_counters_desc {
333 	__u32	description;
334 	__u32	index;
335 };
336 struct mlx5_ib_flow_counters_data {
337 	RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
338 	__u32   ncounters;
339 	__u32   reserved;
340 };
341 struct mlx5_ib_create_flow {
342 	__u32   ncounters_data;
343 	__u32   reserved;
344 
345 	struct mlx5_ib_flow_counters_data data[];
346 };
347 #endif
348