1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 bool other_vport,
8 u16 vport_number,
9 u64 *icm_address_rx,
10 u64 *icm_address_tx)
11 {
12 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 int err;
15
16 MLX5_SET(query_esw_vport_context_in, in, opcode,
17 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20
21 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 if (err)
23 return err;
24
25 *icm_address_rx =
26 MLX5_GET64(query_esw_vport_context_out, out,
27 esw_vport_context.sw_steering_vport_icm_address_rx);
28 *icm_address_tx =
29 MLX5_GET64(query_esw_vport_context_out, out,
30 esw_vport_context.sw_steering_vport_icm_address_tx);
31 return 0;
32 }
33
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 u16 vport_number, u16 *gvmi)
36 {
37 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 int out_size;
39 void *out;
40 int err;
41
42 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 out = kzalloc(out_size, GFP_KERNEL);
44 if (!out)
45 return -ENOMEM;
46
47 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 MLX5_SET(query_hca_cap_in, in, op_mod,
51 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 HCA_CAP_OPMOD_GET_CUR);
53
54 err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 if (err) {
56 kfree(out);
57 return err;
58 }
59
60 *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61
62 kfree(out);
63 return 0;
64 }
65
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 struct mlx5dr_esw_caps *caps)
68 {
69 caps->drop_icm_address_rx =
70 MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 sw_steering_fdb_action_drop_icm_address_rx);
72 caps->drop_icm_address_tx =
73 MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 sw_steering_fdb_action_drop_icm_address_tx);
75 caps->uplink_icm_address_rx =
76 MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 sw_steering_uplink_icm_address_rx);
78 caps->uplink_icm_address_tx =
79 MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 sw_steering_uplink_icm_address_tx);
81 caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 if (!caps->sw_owner_v2)
83 caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84
85 return 0;
86 }
87
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)88 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
89 struct mlx5dr_cmd_caps *caps)
90 {
91 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
92 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
93 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
94 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
95 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
96
97 if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
98 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
99 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
100 }
101
102 if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
103 caps->flex_parser_id_icmpv6_dw0 =
104 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
105 caps->flex_parser_id_icmpv6_dw1 =
106 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
107 }
108
109 caps->nic_rx_drop_address =
110 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
111 caps->nic_tx_drop_address =
112 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
113 caps->nic_tx_allow_address =
114 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
115
116 caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
117 caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
118
119 if (!caps->rx_sw_owner_v2)
120 caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
121 if (!caps->tx_sw_owner_v2)
122 caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
123
124 caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
125
126 caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
127 caps->hdr_modify_icm_addr =
128 MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
129
130 caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
131
132 return 0;
133 }
134
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)135 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
136 enum fs_flow_table_type type,
137 u32 table_id,
138 struct mlx5dr_cmd_query_flow_table_details *output)
139 {
140 u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
141 u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
142 int err;
143
144 MLX5_SET(query_flow_table_in, in, opcode,
145 MLX5_CMD_OP_QUERY_FLOW_TABLE);
146
147 MLX5_SET(query_flow_table_in, in, table_type, type);
148 MLX5_SET(query_flow_table_in, in, table_id, table_id);
149
150 err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
151 if (err)
152 return err;
153
154 output->status = MLX5_GET(query_flow_table_out, out, status);
155 output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
156
157 output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
158 flow_table_context.sw_owner_icm_root_1);
159 output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
160 flow_table_context.sw_owner_icm_root_0);
161
162 return 0;
163 }
164
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)165 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
166 {
167 u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
168
169 MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
170
171 return mlx5_cmd_exec_in(mdev, sync_steering, in);
172 }
173
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u32 vport_id)174 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
175 u32 table_type,
176 u32 table_id,
177 u32 group_id,
178 u32 modify_header_id,
179 u32 vport_id)
180 {
181 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
182 void *in_flow_context;
183 unsigned int inlen;
184 void *in_dests;
185 u32 *in;
186 int err;
187
188 inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
189 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
190
191 in = kvzalloc(inlen, GFP_KERNEL);
192 if (!in)
193 return -ENOMEM;
194
195 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
196 MLX5_SET(set_fte_in, in, table_type, table_type);
197 MLX5_SET(set_fte_in, in, table_id, table_id);
198
199 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
200 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
201 MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
202 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
203 MLX5_SET(flow_context, in_flow_context, action,
204 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
205 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
206
207 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
208 MLX5_SET(dest_format_struct, in_dests, destination_type,
209 MLX5_FLOW_DESTINATION_TYPE_VPORT);
210 MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
211
212 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
213 kvfree(in);
214
215 return err;
216 }
217
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)218 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
219 u32 table_type,
220 u32 table_id)
221 {
222 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
223
224 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
225 MLX5_SET(delete_fte_in, in, table_type, table_type);
226 MLX5_SET(delete_fte_in, in, table_id, table_id);
227
228 return mlx5_cmd_exec_in(mdev, delete_fte, in);
229 }
230
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)231 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
232 u32 table_type,
233 u8 num_of_actions,
234 u64 *actions,
235 u32 *modify_header_id)
236 {
237 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
238 void *p_actions;
239 u32 inlen;
240 u32 *in;
241 int err;
242
243 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
244 num_of_actions * sizeof(u64);
245 in = kvzalloc(inlen, GFP_KERNEL);
246 if (!in)
247 return -ENOMEM;
248
249 MLX5_SET(alloc_modify_header_context_in, in, opcode,
250 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
251 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
252 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
253 p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
254 memcpy(p_actions, actions, num_of_actions * sizeof(u64));
255
256 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
257 if (err)
258 goto out;
259
260 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
261 modify_header_id);
262 out:
263 kvfree(in);
264 return err;
265 }
266
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)267 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
268 u32 modify_header_id)
269 {
270 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
271
272 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
273 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
274 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
275 modify_header_id);
276
277 return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
278 }
279
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)280 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
281 u32 table_type,
282 u32 table_id,
283 u32 *group_id)
284 {
285 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
286 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
287 u32 *in;
288 int err;
289
290 in = kzalloc(inlen, GFP_KERNEL);
291 if (!in)
292 return -ENOMEM;
293
294 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
295 MLX5_SET(create_flow_group_in, in, table_type, table_type);
296 MLX5_SET(create_flow_group_in, in, table_id, table_id);
297
298 err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
299 if (err)
300 goto out;
301
302 *group_id = MLX5_GET(create_flow_group_out, out, group_id);
303
304 out:
305 kfree(in);
306 return err;
307 }
308
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)309 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
310 u32 table_type,
311 u32 table_id,
312 u32 group_id)
313 {
314 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
315
316 MLX5_SET(destroy_flow_group_in, in, opcode,
317 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
318 MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
319 MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
320 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
321
322 return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
323 }
324
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)325 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
326 struct mlx5dr_cmd_create_flow_table_attr *attr,
327 u64 *fdb_rx_icm_addr,
328 u32 *table_id)
329 {
330 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
331 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
332 void *ft_mdev;
333 int err;
334
335 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
336 MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
337
338 ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
339 MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
340 MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
341 MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
342
343 if (attr->sw_owner) {
344 /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
345 * icm_addr_1 used for FDB TX
346 */
347 if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
348 MLX5_SET64(flow_table_context, ft_mdev,
349 sw_owner_icm_root_0, attr->icm_addr_rx);
350 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
351 MLX5_SET64(flow_table_context, ft_mdev,
352 sw_owner_icm_root_0, attr->icm_addr_tx);
353 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
354 MLX5_SET64(flow_table_context, ft_mdev,
355 sw_owner_icm_root_0, attr->icm_addr_rx);
356 MLX5_SET64(flow_table_context, ft_mdev,
357 sw_owner_icm_root_1, attr->icm_addr_tx);
358 }
359 }
360
361 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
362 attr->decap_en);
363 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
364 attr->reformat_en);
365
366 err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
367 if (err)
368 return err;
369
370 *table_id = MLX5_GET(create_flow_table_out, out, table_id);
371 if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
372 fdb_rx_icm_addr)
373 *fdb_rx_icm_addr =
374 (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
375 (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
376 (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
377
378 return 0;
379 }
380
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)381 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
382 u32 table_id,
383 u32 table_type)
384 {
385 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
386
387 MLX5_SET(destroy_flow_table_in, in, opcode,
388 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
389 MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
390 MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
391
392 return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
393 }
394
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,size_t reformat_size,void * reformat_data,u32 * reformat_id)395 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
396 enum mlx5_reformat_ctx_type rt,
397 size_t reformat_size,
398 void *reformat_data,
399 u32 *reformat_id)
400 {
401 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
402 size_t inlen, cmd_data_sz, cmd_total_sz;
403 void *prctx;
404 void *pdata;
405 void *in;
406 int err;
407
408 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
409 cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
410 packet_reformat_context.reformat_data);
411 inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
412 in = kvzalloc(inlen, GFP_KERNEL);
413 if (!in)
414 return -ENOMEM;
415
416 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
417 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
418
419 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
420 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
421
422 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
423 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
424 memcpy(pdata, reformat_data, reformat_size);
425
426 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
427 if (err)
428 goto err_free_in;
429
430 *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
431
432 err_free_in:
433 kvfree(in);
434 return err;
435 }
436
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)437 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
438 u32 reformat_id)
439 {
440 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
441
442 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
443 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
444 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
445 reformat_id);
446
447 mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
448 }
449
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)450 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
451 u16 index, struct mlx5dr_cmd_gid_attr *attr)
452 {
453 u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
454 u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
455 int err;
456
457 MLX5_SET(query_roce_address_in, in, opcode,
458 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
459
460 MLX5_SET(query_roce_address_in, in, roce_address_index, index);
461 MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
462
463 err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
464 if (err)
465 return err;
466
467 memcpy(&attr->gid,
468 MLX5_ADDR_OF(query_roce_address_out,
469 out, roce_address.source_l3_address),
470 sizeof(attr->gid));
471 memcpy(attr->mac,
472 MLX5_ADDR_OF(query_roce_address_out, out,
473 roce_address.source_mac_47_32),
474 sizeof(attr->mac));
475
476 if (MLX5_GET(query_roce_address_out, out,
477 roce_address.roce_version) == MLX5_ROCE_VERSION_2)
478 attr->roce_ver = MLX5_ROCE_VERSION_2;
479 else
480 attr->roce_ver = MLX5_ROCE_VERSION_1;
481
482 return 0;
483 }
484
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)485 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
486 struct mlx5dr_cmd_fte_info *fte,
487 bool *extended_dest)
488 {
489 int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
490 int num_fwd_destinations = 0;
491 int num_encap = 0;
492 int i;
493
494 *extended_dest = false;
495 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
496 return 0;
497 for (i = 0; i < fte->dests_size; i++) {
498 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
499 continue;
500 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
501 fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
502 num_encap++;
503 num_fwd_destinations++;
504 }
505
506 if (num_fwd_destinations > 1 && num_encap > 0)
507 *extended_dest = true;
508
509 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
510 mlx5_core_warn(dev, "FW does not support extended destination");
511 return -EOPNOTSUPP;
512 }
513 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
514 mlx5_core_warn(dev, "FW does not support more than %d encaps",
515 1 << fw_log_max_fdb_encap_uplink);
516 return -EOPNOTSUPP;
517 }
518
519 return 0;
520 }
521
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)522 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
523 int opmod, int modify_mask,
524 struct mlx5dr_cmd_ft_info *ft,
525 u32 group_id,
526 struct mlx5dr_cmd_fte_info *fte)
527 {
528 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
529 void *in_flow_context, *vlan;
530 bool extended_dest = false;
531 void *in_match_value;
532 unsigned int inlen;
533 int dst_cnt_size;
534 void *in_dests;
535 u32 *in;
536 int err;
537 int i;
538
539 if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
540 return -EOPNOTSUPP;
541
542 if (!extended_dest)
543 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
544 else
545 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
546
547 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
548 in = kvzalloc(inlen, GFP_KERNEL);
549 if (!in)
550 return -ENOMEM;
551
552 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
553 MLX5_SET(set_fte_in, in, op_mod, opmod);
554 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
555 MLX5_SET(set_fte_in, in, table_type, ft->type);
556 MLX5_SET(set_fte_in, in, table_id, ft->id);
557 MLX5_SET(set_fte_in, in, flow_index, fte->index);
558 if (ft->vport) {
559 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
560 MLX5_SET(set_fte_in, in, other_vport, 1);
561 }
562
563 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
564 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
565
566 MLX5_SET(flow_context, in_flow_context, flow_tag,
567 fte->flow_context.flow_tag);
568 MLX5_SET(flow_context, in_flow_context, flow_source,
569 fte->flow_context.flow_source);
570
571 MLX5_SET(flow_context, in_flow_context, extended_destination,
572 extended_dest);
573 if (extended_dest) {
574 u32 action;
575
576 action = fte->action.action &
577 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
578 MLX5_SET(flow_context, in_flow_context, action, action);
579 } else {
580 MLX5_SET(flow_context, in_flow_context, action,
581 fte->action.action);
582 if (fte->action.pkt_reformat)
583 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
584 fte->action.pkt_reformat->id);
585 }
586 if (fte->action.modify_hdr)
587 MLX5_SET(flow_context, in_flow_context, modify_header_id,
588 fte->action.modify_hdr->id);
589
590 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
591
592 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
593 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
594 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
595
596 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
597
598 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
599 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
600 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
601
602 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
603 match_value);
604 memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
605
606 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
607 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
608 int list_size = 0;
609
610 for (i = 0; i < fte->dests_size; i++) {
611 unsigned int id, type = fte->dest_arr[i].type;
612
613 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
614 continue;
615
616 switch (type) {
617 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
618 id = fte->dest_arr[i].ft_num;
619 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
620 break;
621 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
622 id = fte->dest_arr[i].ft_id;
623 break;
624 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
625 id = fte->dest_arr[i].vport.num;
626 MLX5_SET(dest_format_struct, in_dests,
627 destination_eswitch_owner_vhca_id_valid,
628 !!(fte->dest_arr[i].vport.flags &
629 MLX5_FLOW_DEST_VPORT_VHCA_ID));
630 MLX5_SET(dest_format_struct, in_dests,
631 destination_eswitch_owner_vhca_id,
632 fte->dest_arr[i].vport.vhca_id);
633 if (extended_dest && (fte->dest_arr[i].vport.flags &
634 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
635 MLX5_SET(dest_format_struct, in_dests,
636 packet_reformat,
637 !!(fte->dest_arr[i].vport.flags &
638 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
639 MLX5_SET(extended_dest_format, in_dests,
640 packet_reformat_id,
641 fte->dest_arr[i].vport.reformat_id);
642 }
643 break;
644 default:
645 id = fte->dest_arr[i].tir_num;
646 }
647
648 MLX5_SET(dest_format_struct, in_dests, destination_type,
649 type);
650 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
651 in_dests += dst_cnt_size;
652 list_size++;
653 }
654
655 MLX5_SET(flow_context, in_flow_context, destination_list_size,
656 list_size);
657 }
658
659 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
660 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
661 log_max_flow_counter,
662 ft->type));
663 int list_size = 0;
664
665 for (i = 0; i < fte->dests_size; i++) {
666 if (fte->dest_arr[i].type !=
667 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
668 continue;
669
670 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
671 fte->dest_arr[i].counter_id);
672 in_dests += dst_cnt_size;
673 list_size++;
674 }
675 if (list_size > max_list_size) {
676 err = -EINVAL;
677 goto err_out;
678 }
679
680 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
681 list_size);
682 }
683
684 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
685 err_out:
686 kvfree(in);
687 return err;
688 }
689