1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41
mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 struct mlx5_flow_table *ft,
44 u32 underlay_qpn,
45 bool disconnect)
46 {
47 return 0;
48 }
49
mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int log_size,struct mlx5_flow_table * next_ft)50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 struct mlx5_flow_table *ft,
52 unsigned int log_size,
53 struct mlx5_flow_table *next_ft)
54 {
55 return 0;
56 }
57
mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)58 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 struct mlx5_flow_table *ft)
60 {
61 return 0;
62 }
63
mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)64 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table *next_ft)
67 {
68 return 0;
69 }
70
mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)71 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 struct mlx5_flow_table *ft,
73 u32 *in,
74 struct mlx5_flow_group *fg)
75 {
76 return 0;
77 }
78
mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)79 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 struct mlx5_flow_table *ft,
81 struct mlx5_flow_group *fg)
82 {
83 return 0;
84 }
85
mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)86 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 struct mlx5_flow_table *ft,
88 struct mlx5_flow_group *group,
89 struct fs_fte *fte)
90 {
91 return 0;
92 }
93
mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)94 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 struct mlx5_flow_table *ft,
96 struct mlx5_flow_group *group,
97 int modify_mask,
98 struct fs_fte *fte)
99 {
100 return -EOPNOTSUPP;
101 }
102
mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)103 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 struct mlx5_flow_table *ft,
105 struct fs_fte *fte)
106 {
107 return 0;
108 }
109
mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)110 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
111 int reformat_type,
112 size_t size,
113 void *reformat_data,
114 enum mlx5_flow_namespace_type namespace,
115 struct mlx5_pkt_reformat *pkt_reformat)
116 {
117 return 0;
118 }
119
mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)120 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
121 struct mlx5_pkt_reformat *pkt_reformat)
122 {
123 }
124
mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)125 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
126 u8 namespace, u8 num_actions,
127 void *modify_actions,
128 struct mlx5_modify_hdr *modify_hdr)
129 {
130 return 0;
131 }
132
mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)133 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
134 struct mlx5_modify_hdr *modify_hdr)
135 {
136 }
137
mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)138 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
139 struct mlx5_flow_root_namespace *peer_ns)
140 {
141 return 0;
142 }
143
mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace * ns)144 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
145 {
146 return 0;
147 }
148
mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace * ns)149 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
150 {
151 return 0;
152 }
153
mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)154 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
155 struct mlx5_flow_table *ft, u32 underlay_qpn,
156 bool disconnect)
157 {
158 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
159 struct mlx5_core_dev *dev = ns->dev;
160
161 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
162 underlay_qpn == 0)
163 return 0;
164
165 MLX5_SET(set_flow_table_root_in, in, opcode,
166 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
167 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
168
169 if (disconnect)
170 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
171 else
172 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
173
174 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
175 if (ft->vport) {
176 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
177 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
178 }
179
180 return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
181 }
182
mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int log_size,struct mlx5_flow_table * next_ft)183 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
184 struct mlx5_flow_table *ft,
185 unsigned int log_size,
186 struct mlx5_flow_table *next_ft)
187 {
188 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
189 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
190 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
191 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
192 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
193 struct mlx5_core_dev *dev = ns->dev;
194 int err;
195
196 MLX5_SET(create_flow_table_in, in, opcode,
197 MLX5_CMD_OP_CREATE_FLOW_TABLE);
198
199 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
200 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
201 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
202 if (ft->vport) {
203 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
204 MLX5_SET(create_flow_table_in, in, other_vport, 1);
205 }
206
207 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
208 en_decap);
209 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
210 en_encap);
211 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
212 term);
213
214 switch (ft->op_mod) {
215 case FS_FT_OP_MOD_NORMAL:
216 if (next_ft) {
217 MLX5_SET(create_flow_table_in, in,
218 flow_table_context.table_miss_action,
219 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
220 MLX5_SET(create_flow_table_in, in,
221 flow_table_context.table_miss_id, next_ft->id);
222 } else {
223 MLX5_SET(create_flow_table_in, in,
224 flow_table_context.table_miss_action,
225 ft->def_miss_action);
226 }
227 break;
228
229 case FS_FT_OP_MOD_LAG_DEMUX:
230 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
231 if (next_ft)
232 MLX5_SET(create_flow_table_in, in,
233 flow_table_context.lag_master_next_table_id,
234 next_ft->id);
235 break;
236 }
237
238 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
239 if (!err)
240 ft->id = MLX5_GET(create_flow_table_out, out,
241 table_id);
242 return err;
243 }
244
mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)245 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
246 struct mlx5_flow_table *ft)
247 {
248 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
249 struct mlx5_core_dev *dev = ns->dev;
250
251 MLX5_SET(destroy_flow_table_in, in, opcode,
252 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
253 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
254 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
255 if (ft->vport) {
256 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
257 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
258 }
259
260 return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
261 }
262
mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)263 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
264 struct mlx5_flow_table *ft,
265 struct mlx5_flow_table *next_ft)
266 {
267 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
268 struct mlx5_core_dev *dev = ns->dev;
269
270 MLX5_SET(modify_flow_table_in, in, opcode,
271 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
272 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
273 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
274
275 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
276 MLX5_SET(modify_flow_table_in, in, modify_field_select,
277 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
278 if (next_ft) {
279 MLX5_SET(modify_flow_table_in, in,
280 flow_table_context.lag_master_next_table_id, next_ft->id);
281 } else {
282 MLX5_SET(modify_flow_table_in, in,
283 flow_table_context.lag_master_next_table_id, 0);
284 }
285 } else {
286 if (ft->vport) {
287 MLX5_SET(modify_flow_table_in, in, vport_number,
288 ft->vport);
289 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
290 }
291 MLX5_SET(modify_flow_table_in, in, modify_field_select,
292 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
293 if (next_ft) {
294 MLX5_SET(modify_flow_table_in, in,
295 flow_table_context.table_miss_action,
296 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
297 MLX5_SET(modify_flow_table_in, in,
298 flow_table_context.table_miss_id,
299 next_ft->id);
300 } else {
301 MLX5_SET(modify_flow_table_in, in,
302 flow_table_context.table_miss_action,
303 ft->def_miss_action);
304 }
305 }
306
307 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
308 }
309
mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)310 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
311 struct mlx5_flow_table *ft,
312 u32 *in,
313 struct mlx5_flow_group *fg)
314 {
315 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
316 struct mlx5_core_dev *dev = ns->dev;
317 int err;
318
319 MLX5_SET(create_flow_group_in, in, opcode,
320 MLX5_CMD_OP_CREATE_FLOW_GROUP);
321 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
322 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
323 if (ft->vport) {
324 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
325 MLX5_SET(create_flow_group_in, in, other_vport, 1);
326 }
327
328 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
329 if (!err)
330 fg->id = MLX5_GET(create_flow_group_out, out,
331 group_id);
332 return err;
333 }
334
mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)335 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
336 struct mlx5_flow_table *ft,
337 struct mlx5_flow_group *fg)
338 {
339 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
340 struct mlx5_core_dev *dev = ns->dev;
341
342 MLX5_SET(destroy_flow_group_in, in, opcode,
343 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
344 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
345 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
346 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
347 if (ft->vport) {
348 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
349 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
350 }
351
352 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
353 }
354
mlx5_set_extended_dest(struct mlx5_core_dev * dev,struct fs_fte * fte,bool * extended_dest)355 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
356 struct fs_fte *fte, bool *extended_dest)
357 {
358 int fw_log_max_fdb_encap_uplink =
359 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
360 int num_fwd_destinations = 0;
361 struct mlx5_flow_rule *dst;
362 int num_encap = 0;
363
364 *extended_dest = false;
365 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
366 return 0;
367
368 list_for_each_entry(dst, &fte->node.children, node.list) {
369 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
370 continue;
371 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
372 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
373 num_encap++;
374 num_fwd_destinations++;
375 }
376 if (num_fwd_destinations > 1 && num_encap > 0)
377 *extended_dest = true;
378
379 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
380 mlx5_core_warn(dev, "FW does not support extended destination");
381 return -EOPNOTSUPP;
382 }
383 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
384 mlx5_core_warn(dev, "FW does not support more than %d encaps",
385 1 << fw_log_max_fdb_encap_uplink);
386 return -EOPNOTSUPP;
387 }
388
389 return 0;
390 }
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)391 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
392 int opmod, int modify_mask,
393 struct mlx5_flow_table *ft,
394 unsigned group_id,
395 struct fs_fte *fte)
396 {
397 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
398 bool extended_dest = false;
399 struct mlx5_flow_rule *dst;
400 void *in_flow_context, *vlan;
401 void *in_match_value;
402 unsigned int inlen;
403 int dst_cnt_size;
404 void *in_dests;
405 u32 *in;
406 int err;
407
408 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
409 return -EOPNOTSUPP;
410
411 if (!extended_dest)
412 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
413 else
414 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
415
416 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
417 in = kvzalloc(inlen, GFP_KERNEL);
418 if (!in)
419 return -ENOMEM;
420
421 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
422 MLX5_SET(set_fte_in, in, op_mod, opmod);
423 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
424 MLX5_SET(set_fte_in, in, table_type, ft->type);
425 MLX5_SET(set_fte_in, in, table_id, ft->id);
426 MLX5_SET(set_fte_in, in, flow_index, fte->index);
427 MLX5_SET(set_fte_in, in, ignore_flow_level,
428 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
429
430 if (ft->vport) {
431 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
432 MLX5_SET(set_fte_in, in, other_vport, 1);
433 }
434
435 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
436 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
437
438 MLX5_SET(flow_context, in_flow_context, flow_tag,
439 fte->flow_context.flow_tag);
440 MLX5_SET(flow_context, in_flow_context, flow_source,
441 fte->flow_context.flow_source);
442
443 MLX5_SET(flow_context, in_flow_context, extended_destination,
444 extended_dest);
445 if (extended_dest) {
446 u32 action;
447
448 action = fte->action.action &
449 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
450 MLX5_SET(flow_context, in_flow_context, action, action);
451 } else {
452 MLX5_SET(flow_context, in_flow_context, action,
453 fte->action.action);
454 if (fte->action.pkt_reformat)
455 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
456 fte->action.pkt_reformat->id);
457 }
458 if (fte->action.modify_hdr)
459 MLX5_SET(flow_context, in_flow_context, modify_header_id,
460 fte->action.modify_hdr->id);
461
462 MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
463
464 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
465
466 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
467 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
468 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
469
470 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
471
472 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
473 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
474 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
475
476 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
477 match_value);
478 memcpy(in_match_value, &fte->val, sizeof(fte->val));
479
480 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
481 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
482 int list_size = 0;
483
484 list_for_each_entry(dst, &fte->node.children, node.list) {
485 unsigned int id, type = dst->dest_attr.type;
486
487 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
488 continue;
489
490 switch (type) {
491 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
492 id = dst->dest_attr.ft_num;
493 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
494 break;
495 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
496 id = dst->dest_attr.ft->id;
497 break;
498 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
499 id = dst->dest_attr.vport.num;
500 MLX5_SET(dest_format_struct, in_dests,
501 destination_eswitch_owner_vhca_id_valid,
502 !!(dst->dest_attr.vport.flags &
503 MLX5_FLOW_DEST_VPORT_VHCA_ID));
504 MLX5_SET(dest_format_struct, in_dests,
505 destination_eswitch_owner_vhca_id,
506 dst->dest_attr.vport.vhca_id);
507 if (extended_dest &&
508 dst->dest_attr.vport.pkt_reformat) {
509 MLX5_SET(dest_format_struct, in_dests,
510 packet_reformat,
511 !!(dst->dest_attr.vport.flags &
512 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
513 MLX5_SET(extended_dest_format, in_dests,
514 packet_reformat_id,
515 dst->dest_attr.vport.pkt_reformat->id);
516 }
517 break;
518 default:
519 id = dst->dest_attr.tir_num;
520 }
521
522 MLX5_SET(dest_format_struct, in_dests, destination_type,
523 type);
524 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
525 in_dests += dst_cnt_size;
526 list_size++;
527 }
528
529 MLX5_SET(flow_context, in_flow_context, destination_list_size,
530 list_size);
531 }
532
533 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
534 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
535 log_max_flow_counter,
536 ft->type));
537 int list_size = 0;
538
539 list_for_each_entry(dst, &fte->node.children, node.list) {
540 if (dst->dest_attr.type !=
541 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
542 continue;
543
544 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
545 dst->dest_attr.counter_id);
546 in_dests += dst_cnt_size;
547 list_size++;
548 }
549 if (list_size > max_list_size) {
550 err = -EINVAL;
551 goto err_out;
552 }
553
554 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
555 list_size);
556 }
557
558 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
559 err_out:
560 kvfree(in);
561 return err;
562 }
563
mlx5_cmd_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)564 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
565 struct mlx5_flow_table *ft,
566 struct mlx5_flow_group *group,
567 struct fs_fte *fte)
568 {
569 struct mlx5_core_dev *dev = ns->dev;
570 unsigned int group_id = group->id;
571
572 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
573 }
574
mlx5_cmd_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)575 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
576 struct mlx5_flow_table *ft,
577 struct mlx5_flow_group *fg,
578 int modify_mask,
579 struct fs_fte *fte)
580 {
581 int opmod;
582 struct mlx5_core_dev *dev = ns->dev;
583 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
584 flow_table_properties_nic_receive.
585 flow_modify_en);
586 if (!atomic_mod_cap)
587 return -EOPNOTSUPP;
588 opmod = 1;
589
590 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
591 }
592
mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)593 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
594 struct mlx5_flow_table *ft,
595 struct fs_fte *fte)
596 {
597 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
598 struct mlx5_core_dev *dev = ns->dev;
599
600 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
601 MLX5_SET(delete_fte_in, in, table_type, ft->type);
602 MLX5_SET(delete_fte_in, in, table_id, ft->id);
603 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
604 if (ft->vport) {
605 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
606 MLX5_SET(delete_fte_in, in, other_vport, 1);
607 }
608
609 return mlx5_cmd_exec_in(dev, delete_fte, in);
610 }
611
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,u32 * id)612 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
613 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
614 u32 *id)
615 {
616 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
617 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
618 int err;
619
620 MLX5_SET(alloc_flow_counter_in, in, opcode,
621 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
622 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
623
624 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
625 if (!err)
626 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
627 return err;
628 }
629
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)630 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
631 {
632 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
633 }
634
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)635 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
636 {
637 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
638
639 MLX5_SET(dealloc_flow_counter_in, in, opcode,
640 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
641 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
642 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
643 }
644
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)645 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
646 u64 *packets, u64 *bytes)
647 {
648 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
649 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
650 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
651 void *stats;
652 int err = 0;
653
654 MLX5_SET(query_flow_counter_in, in, opcode,
655 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
656 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
657 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
658 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
659 if (err)
660 return err;
661
662 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
663 *packets = MLX5_GET64(traffic_counter, stats, packets);
664 *bytes = MLX5_GET64(traffic_counter, stats, octets);
665 return 0;
666 }
667
mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)668 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
669 {
670 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
671 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
672 }
673
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,u32 base_id,int bulk_len,u32 * out)674 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
675 u32 *out)
676 {
677 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
678 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
679
680 MLX5_SET(query_flow_counter_in, in, opcode,
681 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
682 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
683 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
684 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
685 }
686
mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)687 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
688 int reformat_type,
689 size_t size,
690 void *reformat_data,
691 enum mlx5_flow_namespace_type namespace,
692 struct mlx5_pkt_reformat *pkt_reformat)
693 {
694 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
695 struct mlx5_core_dev *dev = ns->dev;
696 void *packet_reformat_context_in;
697 int max_encap_size;
698 void *reformat;
699 int inlen;
700 int err;
701 u32 *in;
702
703 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
704 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
705 else
706 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
707
708 if (size > max_encap_size) {
709 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
710 size, max_encap_size);
711 return -EINVAL;
712 }
713
714 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
715 GFP_KERNEL);
716 if (!in)
717 return -ENOMEM;
718
719 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
720 in, packet_reformat_context);
721 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
722 packet_reformat_context_in,
723 reformat_data);
724 inlen = reformat - (void *)in + size;
725
726 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
727 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
728 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
729 reformat_data_size, size);
730 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
731 reformat_type, reformat_type);
732 memcpy(reformat, reformat_data, size);
733
734 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
735
736 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
737 out, packet_reformat_id);
738 kfree(in);
739 return err;
740 }
741
mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)742 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
743 struct mlx5_pkt_reformat *pkt_reformat)
744 {
745 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
746 struct mlx5_core_dev *dev = ns->dev;
747
748 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
749 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
750 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
751 pkt_reformat->id);
752
753 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
754 }
755
mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)756 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
757 u8 namespace, u8 num_actions,
758 void *modify_actions,
759 struct mlx5_modify_hdr *modify_hdr)
760 {
761 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
762 int max_actions, actions_size, inlen, err;
763 struct mlx5_core_dev *dev = ns->dev;
764 void *actions_in;
765 u8 table_type;
766 u32 *in;
767
768 switch (namespace) {
769 case MLX5_FLOW_NAMESPACE_FDB:
770 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
771 table_type = FS_FT_FDB;
772 break;
773 case MLX5_FLOW_NAMESPACE_KERNEL:
774 case MLX5_FLOW_NAMESPACE_BYPASS:
775 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
776 table_type = FS_FT_NIC_RX;
777 break;
778 case MLX5_FLOW_NAMESPACE_EGRESS:
779 #ifdef CONFIG_MLX5_IPSEC
780 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
781 #endif
782 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
783 table_type = FS_FT_NIC_TX;
784 break;
785 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
786 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
787 table_type = FS_FT_ESW_INGRESS_ACL;
788 break;
789 case MLX5_FLOW_NAMESPACE_RDMA_TX:
790 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
791 table_type = FS_FT_RDMA_TX;
792 break;
793 default:
794 return -EOPNOTSUPP;
795 }
796
797 if (num_actions > max_actions) {
798 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
799 num_actions, max_actions);
800 return -EOPNOTSUPP;
801 }
802
803 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
804 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
805
806 in = kzalloc(inlen, GFP_KERNEL);
807 if (!in)
808 return -ENOMEM;
809
810 MLX5_SET(alloc_modify_header_context_in, in, opcode,
811 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
812 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
813 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
814
815 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
816 memcpy(actions_in, modify_actions, actions_size);
817
818 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
819
820 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
821 kfree(in);
822 return err;
823 }
824
mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)825 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
826 struct mlx5_modify_hdr *modify_hdr)
827 {
828 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
829 struct mlx5_core_dev *dev = ns->dev;
830
831 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
832 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
833 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
834 modify_hdr->id);
835
836 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
837 }
838
839 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
840 .create_flow_table = mlx5_cmd_create_flow_table,
841 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
842 .modify_flow_table = mlx5_cmd_modify_flow_table,
843 .create_flow_group = mlx5_cmd_create_flow_group,
844 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
845 .create_fte = mlx5_cmd_create_fte,
846 .update_fte = mlx5_cmd_update_fte,
847 .delete_fte = mlx5_cmd_delete_fte,
848 .update_root_ft = mlx5_cmd_update_root_ft,
849 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
850 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
851 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
852 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
853 .set_peer = mlx5_cmd_stub_set_peer,
854 .create_ns = mlx5_cmd_stub_create_ns,
855 .destroy_ns = mlx5_cmd_stub_destroy_ns,
856 };
857
858 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
859 .create_flow_table = mlx5_cmd_stub_create_flow_table,
860 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
861 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
862 .create_flow_group = mlx5_cmd_stub_create_flow_group,
863 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
864 .create_fte = mlx5_cmd_stub_create_fte,
865 .update_fte = mlx5_cmd_stub_update_fte,
866 .delete_fte = mlx5_cmd_stub_delete_fte,
867 .update_root_ft = mlx5_cmd_stub_update_root_ft,
868 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
869 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
870 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
871 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
872 .set_peer = mlx5_cmd_stub_set_peer,
873 .create_ns = mlx5_cmd_stub_create_ns,
874 .destroy_ns = mlx5_cmd_stub_destroy_ns,
875 };
876
mlx5_fs_cmd_get_fw_cmds(void)877 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
878 {
879 return &mlx5_flow_cmds;
880 }
881
mlx5_fs_cmd_get_stub_cmds(void)882 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
883 {
884 return &mlx5_flow_cmd_stubs;
885 }
886
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)887 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
888 {
889 switch (type) {
890 case FS_FT_NIC_RX:
891 case FS_FT_ESW_EGRESS_ACL:
892 case FS_FT_ESW_INGRESS_ACL:
893 case FS_FT_FDB:
894 case FS_FT_SNIFFER_RX:
895 case FS_FT_SNIFFER_TX:
896 case FS_FT_NIC_TX:
897 case FS_FT_RDMA_RX:
898 case FS_FT_RDMA_TX:
899 return mlx5_fs_cmd_get_fw_cmds();
900 default:
901 return mlx5_fs_cmd_get_stub_cmds();
902 }
903 }
904