1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 bool other_vport,
8 u16 vport_number,
9 u64 *icm_address_rx,
10 u64 *icm_address_tx)
11 {
12 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 int err;
15
16 MLX5_SET(query_esw_vport_context_in, in, opcode,
17 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20
21 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 if (err)
23 return err;
24
25 *icm_address_rx =
26 MLX5_GET64(query_esw_vport_context_out, out,
27 esw_vport_context.sw_steering_vport_icm_address_rx);
28 *icm_address_tx =
29 MLX5_GET64(query_esw_vport_context_out, out,
30 esw_vport_context.sw_steering_vport_icm_address_tx);
31 return 0;
32 }
33
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 u16 vport_number, u16 *gvmi)
36 {
37 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 int out_size;
39 void *out;
40 int err;
41
42 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 out = kzalloc(out_size, GFP_KERNEL);
44 if (!out)
45 return -ENOMEM;
46
47 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 MLX5_SET(query_hca_cap_in, in, op_mod,
51 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 HCA_CAP_OPMOD_GET_CUR);
53
54 err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 if (err) {
56 kfree(out);
57 return err;
58 }
59
60 *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61
62 kfree(out);
63 return 0;
64 }
65
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 struct mlx5dr_esw_caps *caps)
68 {
69 caps->drop_icm_address_rx =
70 MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 sw_steering_fdb_action_drop_icm_address_rx);
72 caps->drop_icm_address_tx =
73 MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 sw_steering_fdb_action_drop_icm_address_tx);
75 caps->uplink_icm_address_rx =
76 MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 sw_steering_uplink_icm_address_rx);
78 caps->uplink_icm_address_tx =
79 MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 sw_steering_uplink_icm_address_tx);
81 caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 if (!caps->sw_owner_v2)
83 caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84
85 return 0;
86 }
87
dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev * mdev,u16 vport,bool * roce_en)88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 u16 vport, bool *roce_en)
90 {
91 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
93 int err;
94
95 MLX5_SET(query_nic_vport_context_in, in, opcode,
96 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
99
100 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
101 if (err)
102 return err;
103
104 *roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 nic_vport_context.roce_en);
106 return 0;
107 }
108
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 struct mlx5dr_cmd_caps *caps)
111 {
112 bool roce_en;
113 int err;
114
115 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
118 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
120 caps->roce_caps.fl_rc_qp_when_roce_disabled =
121 MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
122
123 if (MLX5_CAP_GEN(mdev, roce)) {
124 err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
125 if (err)
126 return err;
127
128 caps->roce_caps.roce_en = roce_en;
129 caps->roce_caps.fl_rc_qp_when_roce_disabled |=
130 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
131 caps->roce_caps.fl_rc_qp_when_roce_enabled =
132 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
133 }
134
135 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
136
137 /* geneve_tlv_option_0_exist is the indication of
138 * STE support for lookup type flex_parser_ok
139 */
140 caps->flex_parser_ok_bits_supp =
141 MLX5_CAP_FLOWTABLE(mdev,
142 flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
143
144 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
145 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
146 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
147 }
148
149 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
150 caps->flex_parser_id_icmpv6_dw0 =
151 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
152 caps->flex_parser_id_icmpv6_dw1 =
153 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
154 }
155
156 if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
157 caps->flex_parser_id_geneve_tlv_option_0 =
158 MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
159
160 if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
161 caps->flex_parser_id_mpls_over_gre =
162 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
163
164 if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
165 caps->flex_parser_id_mpls_over_udp =
166 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
167
168 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
169 caps->flex_parser_id_gtpu_dw_0 =
170 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
171
172 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
173 caps->flex_parser_id_gtpu_teid =
174 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
175
176 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
177 caps->flex_parser_id_gtpu_dw_2 =
178 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
179
180 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
181 caps->flex_parser_id_gtpu_first_ext_dw_0 =
182 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
183
184 caps->nic_rx_drop_address =
185 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
186 caps->nic_tx_drop_address =
187 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
188 caps->nic_tx_allow_address =
189 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
190
191 caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
192 caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
193
194 if (!caps->rx_sw_owner_v2)
195 caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
196 if (!caps->tx_sw_owner_v2)
197 caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
198
199 caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
200
201 caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
202 caps->hdr_modify_icm_addr =
203 MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
204
205 caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
206
207 caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
208
209 return 0;
210 }
211
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)212 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
213 enum fs_flow_table_type type,
214 u32 table_id,
215 struct mlx5dr_cmd_query_flow_table_details *output)
216 {
217 u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
218 u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
219 int err;
220
221 MLX5_SET(query_flow_table_in, in, opcode,
222 MLX5_CMD_OP_QUERY_FLOW_TABLE);
223
224 MLX5_SET(query_flow_table_in, in, table_type, type);
225 MLX5_SET(query_flow_table_in, in, table_id, table_id);
226
227 err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
228 if (err)
229 return err;
230
231 output->status = MLX5_GET(query_flow_table_out, out, status);
232 output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
233
234 output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
235 flow_table_context.sw_owner_icm_root_1);
236 output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
237 flow_table_context.sw_owner_icm_root_0);
238
239 return 0;
240 }
241
mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev * dev,u32 sampler_id,u64 * rx_icm_addr,u64 * tx_icm_addr)242 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
243 u32 sampler_id,
244 u64 *rx_icm_addr,
245 u64 *tx_icm_addr)
246 {
247 u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
248 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
249 void *attr;
250 int ret;
251
252 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
253 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
254 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
255 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
256 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
257
258 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
259 if (ret)
260 return ret;
261
262 attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
263
264 *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
265 sw_steering_icm_address_rx);
266 *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
267 sw_steering_icm_address_tx);
268
269 return 0;
270 }
271
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)272 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
273 {
274 u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
275
276 MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
277
278 return mlx5_cmd_exec_in(mdev, sync_steering, in);
279 }
280
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u16 vport)281 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
282 u32 table_type,
283 u32 table_id,
284 u32 group_id,
285 u32 modify_header_id,
286 u16 vport)
287 {
288 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
289 void *in_flow_context;
290 unsigned int inlen;
291 void *in_dests;
292 u32 *in;
293 int err;
294
295 inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
296 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
297
298 in = kvzalloc(inlen, GFP_KERNEL);
299 if (!in)
300 return -ENOMEM;
301
302 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
303 MLX5_SET(set_fte_in, in, table_type, table_type);
304 MLX5_SET(set_fte_in, in, table_id, table_id);
305
306 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
307 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
308 MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
309 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
310 MLX5_SET(flow_context, in_flow_context, action,
311 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
312 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
313
314 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
315 MLX5_SET(dest_format_struct, in_dests, destination_type,
316 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
317 MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
318
319 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
320 kvfree(in);
321
322 return err;
323 }
324
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)325 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
326 u32 table_type,
327 u32 table_id)
328 {
329 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
330
331 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
332 MLX5_SET(delete_fte_in, in, table_type, table_type);
333 MLX5_SET(delete_fte_in, in, table_id, table_id);
334
335 return mlx5_cmd_exec_in(mdev, delete_fte, in);
336 }
337
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)338 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
339 u32 table_type,
340 u8 num_of_actions,
341 u64 *actions,
342 u32 *modify_header_id)
343 {
344 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
345 void *p_actions;
346 u32 inlen;
347 u32 *in;
348 int err;
349
350 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
351 num_of_actions * sizeof(u64);
352 in = kvzalloc(inlen, GFP_KERNEL);
353 if (!in)
354 return -ENOMEM;
355
356 MLX5_SET(alloc_modify_header_context_in, in, opcode,
357 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
358 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
359 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
360 p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
361 memcpy(p_actions, actions, num_of_actions * sizeof(u64));
362
363 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
364 if (err)
365 goto out;
366
367 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
368 modify_header_id);
369 out:
370 kvfree(in);
371 return err;
372 }
373
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)374 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
375 u32 modify_header_id)
376 {
377 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
378
379 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
380 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
381 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
382 modify_header_id);
383
384 return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
385 }
386
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)387 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
388 u32 table_type,
389 u32 table_id,
390 u32 *group_id)
391 {
392 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
393 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
394 u32 *in;
395 int err;
396
397 in = kvzalloc(inlen, GFP_KERNEL);
398 if (!in)
399 return -ENOMEM;
400
401 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
402 MLX5_SET(create_flow_group_in, in, table_type, table_type);
403 MLX5_SET(create_flow_group_in, in, table_id, table_id);
404
405 err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
406 if (err)
407 goto out;
408
409 *group_id = MLX5_GET(create_flow_group_out, out, group_id);
410
411 out:
412 kvfree(in);
413 return err;
414 }
415
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)416 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
417 u32 table_type,
418 u32 table_id,
419 u32 group_id)
420 {
421 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
422
423 MLX5_SET(destroy_flow_group_in, in, opcode,
424 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
425 MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
426 MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
427 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
428
429 return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
430 }
431
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)432 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
433 struct mlx5dr_cmd_create_flow_table_attr *attr,
434 u64 *fdb_rx_icm_addr,
435 u32 *table_id)
436 {
437 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
438 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
439 void *ft_mdev;
440 int err;
441
442 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
443 MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
444 MLX5_SET(create_flow_table_in, in, uid, attr->uid);
445
446 ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
447 MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
448 MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
449 MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
450
451 if (attr->sw_owner) {
452 /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
453 * icm_addr_1 used for FDB TX
454 */
455 if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
456 MLX5_SET64(flow_table_context, ft_mdev,
457 sw_owner_icm_root_0, attr->icm_addr_rx);
458 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
459 MLX5_SET64(flow_table_context, ft_mdev,
460 sw_owner_icm_root_0, attr->icm_addr_tx);
461 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
462 MLX5_SET64(flow_table_context, ft_mdev,
463 sw_owner_icm_root_0, attr->icm_addr_rx);
464 MLX5_SET64(flow_table_context, ft_mdev,
465 sw_owner_icm_root_1, attr->icm_addr_tx);
466 }
467 }
468
469 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
470 attr->decap_en);
471 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
472 attr->reformat_en);
473
474 err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
475 if (err)
476 return err;
477
478 *table_id = MLX5_GET(create_flow_table_out, out, table_id);
479 if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
480 fdb_rx_icm_addr)
481 *fdb_rx_icm_addr =
482 (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
483 (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
484 (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
485
486 return 0;
487 }
488
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)489 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
490 u32 table_id,
491 u32 table_type)
492 {
493 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
494
495 MLX5_SET(destroy_flow_table_in, in, opcode,
496 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
497 MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
498 MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
499
500 return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
501 }
502
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,u8 reformat_param_0,u8 reformat_param_1,size_t reformat_size,void * reformat_data,u32 * reformat_id)503 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
504 enum mlx5_reformat_ctx_type rt,
505 u8 reformat_param_0,
506 u8 reformat_param_1,
507 size_t reformat_size,
508 void *reformat_data,
509 u32 *reformat_id)
510 {
511 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
512 size_t inlen, cmd_data_sz, cmd_total_sz;
513 void *prctx;
514 void *pdata;
515 void *in;
516 int err;
517
518 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
519 cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
520 packet_reformat_context.reformat_data);
521 inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
522 in = kvzalloc(inlen, GFP_KERNEL);
523 if (!in)
524 return -ENOMEM;
525
526 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
527 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
528
529 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
530 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
531
532 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
533 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
534 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
535 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
536 if (reformat_data && reformat_size)
537 memcpy(pdata, reformat_data, reformat_size);
538
539 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
540 if (err)
541 goto err_free_in;
542
543 *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
544
545 err_free_in:
546 kvfree(in);
547 return err;
548 }
549
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)550 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
551 u32 reformat_id)
552 {
553 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
554
555 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
556 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
557 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
558 reformat_id);
559
560 mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
561 }
562
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)563 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
564 u16 index, struct mlx5dr_cmd_gid_attr *attr)
565 {
566 u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
567 u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
568 int err;
569
570 MLX5_SET(query_roce_address_in, in, opcode,
571 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
572
573 MLX5_SET(query_roce_address_in, in, roce_address_index, index);
574 MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
575
576 err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
577 if (err)
578 return err;
579
580 memcpy(&attr->gid,
581 MLX5_ADDR_OF(query_roce_address_out,
582 out, roce_address.source_l3_address),
583 sizeof(attr->gid));
584 memcpy(attr->mac,
585 MLX5_ADDR_OF(query_roce_address_out, out,
586 roce_address.source_mac_47_32),
587 sizeof(attr->mac));
588
589 if (MLX5_GET(query_roce_address_out, out,
590 roce_address.roce_version) == MLX5_ROCE_VERSION_2)
591 attr->roce_ver = MLX5_ROCE_VERSION_2;
592 else
593 attr->roce_ver = MLX5_ROCE_VERSION_1;
594
595 return 0;
596 }
597
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)598 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
599 struct mlx5dr_cmd_fte_info *fte,
600 bool *extended_dest)
601 {
602 int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
603 int num_fwd_destinations = 0;
604 int num_encap = 0;
605 int i;
606
607 *extended_dest = false;
608 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
609 return 0;
610 for (i = 0; i < fte->dests_size; i++) {
611 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
612 fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
613 continue;
614 if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
615 fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
616 fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
617 num_encap++;
618 num_fwd_destinations++;
619 }
620
621 if (num_fwd_destinations > 1 && num_encap > 0)
622 *extended_dest = true;
623
624 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
625 mlx5_core_warn(dev, "FW does not support extended destination");
626 return -EOPNOTSUPP;
627 }
628 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
629 mlx5_core_warn(dev, "FW does not support more than %d encaps",
630 1 << fw_log_max_fdb_encap_uplink);
631 return -EOPNOTSUPP;
632 }
633
634 return 0;
635 }
636
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)637 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
638 int opmod, int modify_mask,
639 struct mlx5dr_cmd_ft_info *ft,
640 u32 group_id,
641 struct mlx5dr_cmd_fte_info *fte)
642 {
643 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
644 void *in_flow_context, *vlan;
645 bool extended_dest = false;
646 void *in_match_value;
647 unsigned int inlen;
648 int dst_cnt_size;
649 void *in_dests;
650 u32 *in;
651 int err;
652 int i;
653
654 if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
655 return -EOPNOTSUPP;
656
657 if (!extended_dest)
658 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
659 else
660 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
661
662 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
663 in = kvzalloc(inlen, GFP_KERNEL);
664 if (!in)
665 return -ENOMEM;
666
667 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
668 MLX5_SET(set_fte_in, in, op_mod, opmod);
669 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
670 MLX5_SET(set_fte_in, in, table_type, ft->type);
671 MLX5_SET(set_fte_in, in, table_id, ft->id);
672 MLX5_SET(set_fte_in, in, flow_index, fte->index);
673 MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
674 if (ft->vport) {
675 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
676 MLX5_SET(set_fte_in, in, other_vport, 1);
677 }
678
679 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
680 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
681
682 MLX5_SET(flow_context, in_flow_context, flow_tag,
683 fte->flow_context.flow_tag);
684 MLX5_SET(flow_context, in_flow_context, flow_source,
685 fte->flow_context.flow_source);
686
687 MLX5_SET(flow_context, in_flow_context, extended_destination,
688 extended_dest);
689 if (extended_dest) {
690 u32 action;
691
692 action = fte->action.action &
693 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
694 MLX5_SET(flow_context, in_flow_context, action, action);
695 } else {
696 MLX5_SET(flow_context, in_flow_context, action,
697 fte->action.action);
698 if (fte->action.pkt_reformat)
699 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
700 fte->action.pkt_reformat->id);
701 }
702 if (fte->action.modify_hdr)
703 MLX5_SET(flow_context, in_flow_context, modify_header_id,
704 fte->action.modify_hdr->id);
705
706 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
707
708 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
709 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
710 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
711
712 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
713
714 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
715 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
716 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
717
718 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
719 match_value);
720 memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
721
722 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
723 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
724 int list_size = 0;
725
726 for (i = 0; i < fte->dests_size; i++) {
727 enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
728 enum mlx5_ifc_flow_destination_type ifc_type;
729 unsigned int id;
730
731 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
732 continue;
733
734 switch (type) {
735 case MLX5_FLOW_DESTINATION_TYPE_NONE:
736 continue;
737 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
738 id = fte->dest_arr[i].ft_num;
739 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
740 break;
741 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
742 id = fte->dest_arr[i].ft_id;
743 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
744
745 break;
746 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
747 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
748 if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
749 id = fte->dest_arr[i].vport.num;
750 MLX5_SET(dest_format_struct, in_dests,
751 destination_eswitch_owner_vhca_id_valid,
752 !!(fte->dest_arr[i].vport.flags &
753 MLX5_FLOW_DEST_VPORT_VHCA_ID));
754 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
755 } else {
756 id = 0;
757 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
758 MLX5_SET(dest_format_struct, in_dests,
759 destination_eswitch_owner_vhca_id_valid, 1);
760 }
761 MLX5_SET(dest_format_struct, in_dests,
762 destination_eswitch_owner_vhca_id,
763 fte->dest_arr[i].vport.vhca_id);
764 if (extended_dest && (fte->dest_arr[i].vport.flags &
765 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
766 MLX5_SET(dest_format_struct, in_dests,
767 packet_reformat,
768 !!(fte->dest_arr[i].vport.flags &
769 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
770 MLX5_SET(extended_dest_format, in_dests,
771 packet_reformat_id,
772 fte->dest_arr[i].vport.reformat_id);
773 }
774 break;
775 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
776 id = fte->dest_arr[i].sampler_id;
777 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
778 break;
779 default:
780 id = fte->dest_arr[i].tir_num;
781 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
782 }
783
784 MLX5_SET(dest_format_struct, in_dests, destination_type,
785 ifc_type);
786 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
787 in_dests += dst_cnt_size;
788 list_size++;
789 }
790
791 MLX5_SET(flow_context, in_flow_context, destination_list_size,
792 list_size);
793 }
794
795 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
796 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
797 log_max_flow_counter,
798 ft->type));
799 int list_size = 0;
800
801 for (i = 0; i < fte->dests_size; i++) {
802 if (fte->dest_arr[i].type !=
803 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
804 continue;
805
806 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
807 fte->dest_arr[i].counter_id);
808 in_dests += dst_cnt_size;
809 list_size++;
810 }
811 if (list_size > max_list_size) {
812 err = -EINVAL;
813 goto err_out;
814 }
815
816 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
817 list_size);
818 }
819
820 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
821 err_out:
822 kvfree(in);
823 return err;
824 }
825