1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 bool other_vport,
8 u16 vport_number,
9 u64 *icm_address_rx,
10 u64 *icm_address_tx)
11 {
12 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 int err;
15
16 MLX5_SET(query_esw_vport_context_in, in, opcode,
17 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20
21 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 if (err)
23 return err;
24
25 *icm_address_rx =
26 MLX5_GET64(query_esw_vport_context_out, out,
27 esw_vport_context.sw_steering_vport_icm_address_rx);
28 *icm_address_tx =
29 MLX5_GET64(query_esw_vport_context_out, out,
30 esw_vport_context.sw_steering_vport_icm_address_tx);
31 return 0;
32 }
33
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 u16 vport_number, u16 *gvmi)
36 {
37 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 int out_size;
39 void *out;
40 int err;
41
42 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 out = kzalloc(out_size, GFP_KERNEL);
44 if (!out)
45 return -ENOMEM;
46
47 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 MLX5_SET(query_hca_cap_in, in, op_mod,
51 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 HCA_CAP_OPMOD_GET_CUR);
53
54 err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 if (err) {
56 kfree(out);
57 return err;
58 }
59
60 *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61
62 kfree(out);
63 return 0;
64 }
65
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 struct mlx5dr_esw_caps *caps)
68 {
69 caps->drop_icm_address_rx =
70 MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 sw_steering_fdb_action_drop_icm_address_rx);
72 caps->drop_icm_address_tx =
73 MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 sw_steering_fdb_action_drop_icm_address_tx);
75 caps->uplink_icm_address_rx =
76 MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 sw_steering_uplink_icm_address_rx);
78 caps->uplink_icm_address_tx =
79 MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 sw_steering_uplink_icm_address_tx);
81 caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 if (!caps->sw_owner_v2)
83 caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84
85 return 0;
86 }
87
dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev * mdev,u16 vport,bool * roce_en)88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 u16 vport, bool *roce_en)
90 {
91 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
93 int err;
94
95 MLX5_SET(query_nic_vport_context_in, in, opcode,
96 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
99
100 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
101 if (err)
102 return err;
103
104 *roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 nic_vport_context.roce_en);
106 return 0;
107 }
108
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 struct mlx5dr_cmd_caps *caps)
111 {
112 bool roce_en;
113 int err;
114
115 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
118 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
120 caps->roce_caps.fl_rc_qp_when_roce_disabled =
121 MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
122
123 if (MLX5_CAP_GEN(mdev, roce)) {
124 err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
125 if (err)
126 return err;
127
128 caps->roce_caps.roce_en = roce_en;
129 caps->roce_caps.fl_rc_qp_when_roce_disabled |=
130 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
131 caps->roce_caps.fl_rc_qp_when_roce_enabled =
132 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
133 }
134
135 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
136
137 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
138 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
139 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
140 }
141
142 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
143 caps->flex_parser_id_icmpv6_dw0 =
144 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
145 caps->flex_parser_id_icmpv6_dw1 =
146 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
147 }
148
149 if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
150 caps->flex_parser_id_geneve_tlv_option_0 =
151 MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
152
153 if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
154 caps->flex_parser_id_mpls_over_gre =
155 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
156
157 if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
158 caps->flex_parser_id_mpls_over_udp =
159 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
160
161 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
162 caps->flex_parser_id_gtpu_dw_0 =
163 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
164
165 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
166 caps->flex_parser_id_gtpu_teid =
167 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
168
169 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
170 caps->flex_parser_id_gtpu_dw_2 =
171 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
172
173 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
174 caps->flex_parser_id_gtpu_first_ext_dw_0 =
175 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
176
177 caps->nic_rx_drop_address =
178 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
179 caps->nic_tx_drop_address =
180 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
181 caps->nic_tx_allow_address =
182 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
183
184 caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
185 caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
186
187 if (!caps->rx_sw_owner_v2)
188 caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
189 if (!caps->tx_sw_owner_v2)
190 caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
191
192 caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
193
194 caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
195 caps->hdr_modify_icm_addr =
196 MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
197
198 caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
199
200 return 0;
201 }
202
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)203 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
204 enum fs_flow_table_type type,
205 u32 table_id,
206 struct mlx5dr_cmd_query_flow_table_details *output)
207 {
208 u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
209 u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
210 int err;
211
212 MLX5_SET(query_flow_table_in, in, opcode,
213 MLX5_CMD_OP_QUERY_FLOW_TABLE);
214
215 MLX5_SET(query_flow_table_in, in, table_type, type);
216 MLX5_SET(query_flow_table_in, in, table_id, table_id);
217
218 err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
219 if (err)
220 return err;
221
222 output->status = MLX5_GET(query_flow_table_out, out, status);
223 output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
224
225 output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
226 flow_table_context.sw_owner_icm_root_1);
227 output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
228 flow_table_context.sw_owner_icm_root_0);
229
230 return 0;
231 }
232
mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev * dev,u32 sampler_id,u64 * rx_icm_addr,u64 * tx_icm_addr)233 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
234 u32 sampler_id,
235 u64 *rx_icm_addr,
236 u64 *tx_icm_addr)
237 {
238 u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
239 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
240 void *attr;
241 int ret;
242
243 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
244 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
245 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
246 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
247 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
248
249 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
250 if (ret)
251 return ret;
252
253 attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
254
255 *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
256 sw_steering_icm_address_rx);
257 *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
258 sw_steering_icm_address_tx);
259
260 return 0;
261 }
262
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)263 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
264 {
265 u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
266
267 MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
268
269 return mlx5_cmd_exec_in(mdev, sync_steering, in);
270 }
271
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u32 vport_id)272 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
273 u32 table_type,
274 u32 table_id,
275 u32 group_id,
276 u32 modify_header_id,
277 u32 vport_id)
278 {
279 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
280 void *in_flow_context;
281 unsigned int inlen;
282 void *in_dests;
283 u32 *in;
284 int err;
285
286 inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
287 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
288
289 in = kvzalloc(inlen, GFP_KERNEL);
290 if (!in)
291 return -ENOMEM;
292
293 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
294 MLX5_SET(set_fte_in, in, table_type, table_type);
295 MLX5_SET(set_fte_in, in, table_id, table_id);
296
297 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
298 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
299 MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
300 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
301 MLX5_SET(flow_context, in_flow_context, action,
302 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
303 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
304
305 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
306 MLX5_SET(dest_format_struct, in_dests, destination_type,
307 MLX5_FLOW_DESTINATION_TYPE_VPORT);
308 MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
309
310 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
311 kvfree(in);
312
313 return err;
314 }
315
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)316 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
317 u32 table_type,
318 u32 table_id)
319 {
320 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
321
322 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
323 MLX5_SET(delete_fte_in, in, table_type, table_type);
324 MLX5_SET(delete_fte_in, in, table_id, table_id);
325
326 return mlx5_cmd_exec_in(mdev, delete_fte, in);
327 }
328
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)329 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
330 u32 table_type,
331 u8 num_of_actions,
332 u64 *actions,
333 u32 *modify_header_id)
334 {
335 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
336 void *p_actions;
337 u32 inlen;
338 u32 *in;
339 int err;
340
341 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
342 num_of_actions * sizeof(u64);
343 in = kvzalloc(inlen, GFP_KERNEL);
344 if (!in)
345 return -ENOMEM;
346
347 MLX5_SET(alloc_modify_header_context_in, in, opcode,
348 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
349 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
350 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
351 p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
352 memcpy(p_actions, actions, num_of_actions * sizeof(u64));
353
354 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
355 if (err)
356 goto out;
357
358 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
359 modify_header_id);
360 out:
361 kvfree(in);
362 return err;
363 }
364
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)365 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
366 u32 modify_header_id)
367 {
368 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
369
370 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
371 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
372 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
373 modify_header_id);
374
375 return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
376 }
377
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)378 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
379 u32 table_type,
380 u32 table_id,
381 u32 *group_id)
382 {
383 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
384 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
385 u32 *in;
386 int err;
387
388 in = kvzalloc(inlen, GFP_KERNEL);
389 if (!in)
390 return -ENOMEM;
391
392 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
393 MLX5_SET(create_flow_group_in, in, table_type, table_type);
394 MLX5_SET(create_flow_group_in, in, table_id, table_id);
395
396 err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
397 if (err)
398 goto out;
399
400 *group_id = MLX5_GET(create_flow_group_out, out, group_id);
401
402 out:
403 kvfree(in);
404 return err;
405 }
406
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)407 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
408 u32 table_type,
409 u32 table_id,
410 u32 group_id)
411 {
412 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
413
414 MLX5_SET(destroy_flow_group_in, in, opcode,
415 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
416 MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
417 MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
418 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
419
420 return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
421 }
422
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)423 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
424 struct mlx5dr_cmd_create_flow_table_attr *attr,
425 u64 *fdb_rx_icm_addr,
426 u32 *table_id)
427 {
428 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
429 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
430 void *ft_mdev;
431 int err;
432
433 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
434 MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
435
436 ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
437 MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
438 MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
439 MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
440
441 if (attr->sw_owner) {
442 /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
443 * icm_addr_1 used for FDB TX
444 */
445 if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
446 MLX5_SET64(flow_table_context, ft_mdev,
447 sw_owner_icm_root_0, attr->icm_addr_rx);
448 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
449 MLX5_SET64(flow_table_context, ft_mdev,
450 sw_owner_icm_root_0, attr->icm_addr_tx);
451 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
452 MLX5_SET64(flow_table_context, ft_mdev,
453 sw_owner_icm_root_0, attr->icm_addr_rx);
454 MLX5_SET64(flow_table_context, ft_mdev,
455 sw_owner_icm_root_1, attr->icm_addr_tx);
456 }
457 }
458
459 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
460 attr->decap_en);
461 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
462 attr->reformat_en);
463
464 err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
465 if (err)
466 return err;
467
468 *table_id = MLX5_GET(create_flow_table_out, out, table_id);
469 if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
470 fdb_rx_icm_addr)
471 *fdb_rx_icm_addr =
472 (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
473 (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
474 (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
475
476 return 0;
477 }
478
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)479 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
480 u32 table_id,
481 u32 table_type)
482 {
483 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
484
485 MLX5_SET(destroy_flow_table_in, in, opcode,
486 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
487 MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
488 MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
489
490 return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
491 }
492
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,u8 reformat_param_0,u8 reformat_param_1,size_t reformat_size,void * reformat_data,u32 * reformat_id)493 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
494 enum mlx5_reformat_ctx_type rt,
495 u8 reformat_param_0,
496 u8 reformat_param_1,
497 size_t reformat_size,
498 void *reformat_data,
499 u32 *reformat_id)
500 {
501 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
502 size_t inlen, cmd_data_sz, cmd_total_sz;
503 void *prctx;
504 void *pdata;
505 void *in;
506 int err;
507
508 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
509 cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
510 packet_reformat_context.reformat_data);
511 inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
512 in = kvzalloc(inlen, GFP_KERNEL);
513 if (!in)
514 return -ENOMEM;
515
516 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
517 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
518
519 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
520 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
521
522 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
523 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
524 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
525 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
526 if (reformat_data && reformat_size)
527 memcpy(pdata, reformat_data, reformat_size);
528
529 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
530 if (err)
531 goto err_free_in;
532
533 *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
534
535 err_free_in:
536 kvfree(in);
537 return err;
538 }
539
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)540 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
541 u32 reformat_id)
542 {
543 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
544
545 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
546 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
547 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
548 reformat_id);
549
550 mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
551 }
552
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)553 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
554 u16 index, struct mlx5dr_cmd_gid_attr *attr)
555 {
556 u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
557 u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
558 int err;
559
560 MLX5_SET(query_roce_address_in, in, opcode,
561 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
562
563 MLX5_SET(query_roce_address_in, in, roce_address_index, index);
564 MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
565
566 err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
567 if (err)
568 return err;
569
570 memcpy(&attr->gid,
571 MLX5_ADDR_OF(query_roce_address_out,
572 out, roce_address.source_l3_address),
573 sizeof(attr->gid));
574 memcpy(attr->mac,
575 MLX5_ADDR_OF(query_roce_address_out, out,
576 roce_address.source_mac_47_32),
577 sizeof(attr->mac));
578
579 if (MLX5_GET(query_roce_address_out, out,
580 roce_address.roce_version) == MLX5_ROCE_VERSION_2)
581 attr->roce_ver = MLX5_ROCE_VERSION_2;
582 else
583 attr->roce_ver = MLX5_ROCE_VERSION_1;
584
585 return 0;
586 }
587
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)588 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
589 struct mlx5dr_cmd_fte_info *fte,
590 bool *extended_dest)
591 {
592 int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
593 int num_fwd_destinations = 0;
594 int num_encap = 0;
595 int i;
596
597 *extended_dest = false;
598 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
599 return 0;
600 for (i = 0; i < fte->dests_size; i++) {
601 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
602 continue;
603 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
604 fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
605 num_encap++;
606 num_fwd_destinations++;
607 }
608
609 if (num_fwd_destinations > 1 && num_encap > 0)
610 *extended_dest = true;
611
612 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
613 mlx5_core_warn(dev, "FW does not support extended destination");
614 return -EOPNOTSUPP;
615 }
616 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
617 mlx5_core_warn(dev, "FW does not support more than %d encaps",
618 1 << fw_log_max_fdb_encap_uplink);
619 return -EOPNOTSUPP;
620 }
621
622 return 0;
623 }
624
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)625 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
626 int opmod, int modify_mask,
627 struct mlx5dr_cmd_ft_info *ft,
628 u32 group_id,
629 struct mlx5dr_cmd_fte_info *fte)
630 {
631 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
632 void *in_flow_context, *vlan;
633 bool extended_dest = false;
634 void *in_match_value;
635 unsigned int inlen;
636 int dst_cnt_size;
637 void *in_dests;
638 u32 *in;
639 int err;
640 int i;
641
642 if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
643 return -EOPNOTSUPP;
644
645 if (!extended_dest)
646 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
647 else
648 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
649
650 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
651 in = kvzalloc(inlen, GFP_KERNEL);
652 if (!in)
653 return -ENOMEM;
654
655 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
656 MLX5_SET(set_fte_in, in, op_mod, opmod);
657 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
658 MLX5_SET(set_fte_in, in, table_type, ft->type);
659 MLX5_SET(set_fte_in, in, table_id, ft->id);
660 MLX5_SET(set_fte_in, in, flow_index, fte->index);
661 MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
662 if (ft->vport) {
663 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
664 MLX5_SET(set_fte_in, in, other_vport, 1);
665 }
666
667 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
668 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
669
670 MLX5_SET(flow_context, in_flow_context, flow_tag,
671 fte->flow_context.flow_tag);
672 MLX5_SET(flow_context, in_flow_context, flow_source,
673 fte->flow_context.flow_source);
674
675 MLX5_SET(flow_context, in_flow_context, extended_destination,
676 extended_dest);
677 if (extended_dest) {
678 u32 action;
679
680 action = fte->action.action &
681 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
682 MLX5_SET(flow_context, in_flow_context, action, action);
683 } else {
684 MLX5_SET(flow_context, in_flow_context, action,
685 fte->action.action);
686 if (fte->action.pkt_reformat)
687 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
688 fte->action.pkt_reformat->id);
689 }
690 if (fte->action.modify_hdr)
691 MLX5_SET(flow_context, in_flow_context, modify_header_id,
692 fte->action.modify_hdr->id);
693
694 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
695
696 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
697 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
698 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
699
700 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
701
702 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
703 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
704 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
705
706 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
707 match_value);
708 memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
709
710 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
711 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
712 int list_size = 0;
713
714 for (i = 0; i < fte->dests_size; i++) {
715 unsigned int id, type = fte->dest_arr[i].type;
716
717 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
718 continue;
719
720 switch (type) {
721 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
722 id = fte->dest_arr[i].ft_num;
723 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
724 break;
725 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
726 id = fte->dest_arr[i].ft_id;
727 break;
728 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
729 id = fte->dest_arr[i].vport.num;
730 MLX5_SET(dest_format_struct, in_dests,
731 destination_eswitch_owner_vhca_id_valid,
732 !!(fte->dest_arr[i].vport.flags &
733 MLX5_FLOW_DEST_VPORT_VHCA_ID));
734 MLX5_SET(dest_format_struct, in_dests,
735 destination_eswitch_owner_vhca_id,
736 fte->dest_arr[i].vport.vhca_id);
737 if (extended_dest && (fte->dest_arr[i].vport.flags &
738 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
739 MLX5_SET(dest_format_struct, in_dests,
740 packet_reformat,
741 !!(fte->dest_arr[i].vport.flags &
742 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
743 MLX5_SET(extended_dest_format, in_dests,
744 packet_reformat_id,
745 fte->dest_arr[i].vport.reformat_id);
746 }
747 break;
748 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
749 id = fte->dest_arr[i].sampler_id;
750 break;
751 default:
752 id = fte->dest_arr[i].tir_num;
753 }
754
755 MLX5_SET(dest_format_struct, in_dests, destination_type,
756 type);
757 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
758 in_dests += dst_cnt_size;
759 list_size++;
760 }
761
762 MLX5_SET(flow_context, in_flow_context, destination_list_size,
763 list_size);
764 }
765
766 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
767 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
768 log_max_flow_counter,
769 ft->type));
770 int list_size = 0;
771
772 for (i = 0; i < fte->dests_size; i++) {
773 if (fte->dest_arr[i].type !=
774 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
775 continue;
776
777 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
778 fte->dest_arr[i].counter_id);
779 in_dests += dst_cnt_size;
780 list_size++;
781 }
782 if (list_size > max_list_size) {
783 err = -EINVAL;
784 goto err_out;
785 }
786
787 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
788 list_size);
789 }
790
791 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
792 err_out:
793 kvfree(in);
794 return err;
795 }
796