1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 #include "accel/ipsec.h"
44 #include "fpga/ipsec.h"
45
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
54 .caps = caps_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 __VA_ARGS__)\
62
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 sizeof(long))
71
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
76
77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81
82 #define FS_CHAINING_CAPS_EGRESS \
83 FS_REQUIRED_CAPS( \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89
90 #define FS_CHAINING_CAPS_RDMA_TX \
91 FS_REQUIRED_CAPS( \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
97 .flow_table_modify))
98
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101
102 #define BY_PASS_PRIO_NUM_LEVELS 1
103 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
104 LEFTOVERS_NUM_PRIOS)
105
106 #define ETHTOOL_PRIO_NUM_LEVELS 1
107 #define ETHTOOL_NUM_PRIOS 11
108 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
109 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
110 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
111 #define KERNEL_NIC_NUM_PRIOS 1
112 /* One more level for tc */
113 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
114
115 #define KERNEL_NIC_TC_NUM_PRIOS 1
116 #define KERNEL_NIC_TC_NUM_LEVELS 3
117
118 #define ANCHOR_NUM_LEVELS 1
119 #define ANCHOR_NUM_PRIOS 1
120 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
121
122 #define OFFLOADS_MAX_FT 2
123 #define OFFLOADS_NUM_PRIOS 2
124 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
125
126 #define LAG_PRIO_NUM_LEVELS 1
127 #define LAG_NUM_PRIOS 1
128 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
129
130 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
131 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
132 #define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
133
134 struct node_caps {
135 size_t arr_sz;
136 long *caps;
137 };
138
139 static struct init_tree_node {
140 enum fs_node_type type;
141 struct init_tree_node *children;
142 int ar_size;
143 struct node_caps caps;
144 int min_ft_level;
145 int num_leaf_prios;
146 int prio;
147 int num_levels;
148 enum mlx5_flow_table_miss_action def_miss_action;
149 } root_fs = {
150 .type = FS_TYPE_NAMESPACE,
151 .ar_size = 7,
152 .children = (struct init_tree_node[]){
153 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
154 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
155 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
156 BY_PASS_PRIO_NUM_LEVELS))),
157 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
158 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
159 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
160 LAG_PRIO_NUM_LEVELS))),
161 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
162 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
163 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
164 OFFLOADS_MAX_FT))),
165 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
166 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
167 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
168 ETHTOOL_PRIO_NUM_LEVELS))),
169 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
170 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
171 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
172 KERNEL_NIC_TC_NUM_LEVELS),
173 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
174 KERNEL_NIC_PRIO_NUM_LEVELS))),
175 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
176 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
177 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
178 LEFTOVERS_NUM_LEVELS))),
179 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
180 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
181 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
182 ANCHOR_NUM_LEVELS))),
183 }
184 };
185
186 static struct init_tree_node egress_root_fs = {
187 .type = FS_TYPE_NAMESPACE,
188 #ifdef CONFIG_MLX5_IPSEC
189 .ar_size = 2,
190 #else
191 .ar_size = 1,
192 #endif
193 .children = (struct init_tree_node[]) {
194 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
195 FS_CHAINING_CAPS_EGRESS,
196 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
197 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
198 BY_PASS_PRIO_NUM_LEVELS))),
199 #ifdef CONFIG_MLX5_IPSEC
200 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
201 FS_CHAINING_CAPS_EGRESS,
202 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
203 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
204 KERNEL_TX_IPSEC_NUM_LEVELS))),
205 #endif
206 }
207 };
208
209 #define RDMA_RX_BYPASS_PRIO 0
210 #define RDMA_RX_KERNEL_PRIO 1
211 static struct init_tree_node rdma_rx_root_fs = {
212 .type = FS_TYPE_NAMESPACE,
213 .ar_size = 2,
214 .children = (struct init_tree_node[]) {
215 [RDMA_RX_BYPASS_PRIO] =
216 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
217 FS_CHAINING_CAPS,
218 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
219 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
220 BY_PASS_PRIO_NUM_LEVELS))),
221 [RDMA_RX_KERNEL_PRIO] =
222 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
223 FS_CHAINING_CAPS,
224 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
225 ADD_MULTIPLE_PRIO(1, 1))),
226 }
227 };
228
229 static struct init_tree_node rdma_tx_root_fs = {
230 .type = FS_TYPE_NAMESPACE,
231 .ar_size = 1,
232 .children = (struct init_tree_node[]) {
233 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
234 FS_CHAINING_CAPS_RDMA_TX,
235 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
237 BY_PASS_PRIO_NUM_LEVELS))),
238 }
239 };
240
241 enum fs_i_lock_class {
242 FS_LOCK_GRANDPARENT,
243 FS_LOCK_PARENT,
244 FS_LOCK_CHILD
245 };
246
247 static const struct rhashtable_params rhash_fte = {
248 .key_len = sizeof_field(struct fs_fte, val),
249 .key_offset = offsetof(struct fs_fte, val),
250 .head_offset = offsetof(struct fs_fte, hash),
251 .automatic_shrinking = true,
252 .min_size = 1,
253 };
254
255 static const struct rhashtable_params rhash_fg = {
256 .key_len = sizeof_field(struct mlx5_flow_group, mask),
257 .key_offset = offsetof(struct mlx5_flow_group, mask),
258 .head_offset = offsetof(struct mlx5_flow_group, hash),
259 .automatic_shrinking = true,
260 .min_size = 1,
261
262 };
263
264 static void del_hw_flow_table(struct fs_node *node);
265 static void del_hw_flow_group(struct fs_node *node);
266 static void del_hw_fte(struct fs_node *node);
267 static void del_sw_flow_table(struct fs_node *node);
268 static void del_sw_flow_group(struct fs_node *node);
269 static void del_sw_fte(struct fs_node *node);
270 static void del_sw_prio(struct fs_node *node);
271 static void del_sw_ns(struct fs_node *node);
272 /* Delete rule (destination) is special case that
273 * requires to lock the FTE for all the deletion process.
274 */
275 static void del_sw_hw_rule(struct fs_node *node);
276 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
277 struct mlx5_flow_destination *d2);
278 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
279 static struct mlx5_flow_rule *
280 find_flow_rule(struct fs_fte *fte,
281 struct mlx5_flow_destination *dest);
282
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))283 static void tree_init_node(struct fs_node *node,
284 void (*del_hw_func)(struct fs_node *),
285 void (*del_sw_func)(struct fs_node *))
286 {
287 refcount_set(&node->refcount, 1);
288 INIT_LIST_HEAD(&node->list);
289 INIT_LIST_HEAD(&node->children);
290 init_rwsem(&node->lock);
291 node->del_hw_func = del_hw_func;
292 node->del_sw_func = del_sw_func;
293 node->active = false;
294 }
295
tree_add_node(struct fs_node * node,struct fs_node * parent)296 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
297 {
298 if (parent)
299 refcount_inc(&parent->refcount);
300 node->parent = parent;
301
302 /* Parent is the root */
303 if (!parent)
304 node->root = node;
305 else
306 node->root = parent->root;
307 }
308
tree_get_node(struct fs_node * node)309 static int tree_get_node(struct fs_node *node)
310 {
311 return refcount_inc_not_zero(&node->refcount);
312 }
313
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)314 static void nested_down_read_ref_node(struct fs_node *node,
315 enum fs_i_lock_class class)
316 {
317 if (node) {
318 down_read_nested(&node->lock, class);
319 refcount_inc(&node->refcount);
320 }
321 }
322
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)323 static void nested_down_write_ref_node(struct fs_node *node,
324 enum fs_i_lock_class class)
325 {
326 if (node) {
327 down_write_nested(&node->lock, class);
328 refcount_inc(&node->refcount);
329 }
330 }
331
down_write_ref_node(struct fs_node * node,bool locked)332 static void down_write_ref_node(struct fs_node *node, bool locked)
333 {
334 if (node) {
335 if (!locked)
336 down_write(&node->lock);
337 refcount_inc(&node->refcount);
338 }
339 }
340
up_read_ref_node(struct fs_node * node)341 static void up_read_ref_node(struct fs_node *node)
342 {
343 refcount_dec(&node->refcount);
344 up_read(&node->lock);
345 }
346
up_write_ref_node(struct fs_node * node,bool locked)347 static void up_write_ref_node(struct fs_node *node, bool locked)
348 {
349 refcount_dec(&node->refcount);
350 if (!locked)
351 up_write(&node->lock);
352 }
353
tree_put_node(struct fs_node * node,bool locked)354 static void tree_put_node(struct fs_node *node, bool locked)
355 {
356 struct fs_node *parent_node = node->parent;
357
358 if (refcount_dec_and_test(&node->refcount)) {
359 if (node->del_hw_func)
360 node->del_hw_func(node);
361 if (parent_node) {
362 down_write_ref_node(parent_node, locked);
363 list_del_init(&node->list);
364 }
365 node->del_sw_func(node);
366 if (parent_node)
367 up_write_ref_node(parent_node, locked);
368 node = NULL;
369 }
370 if (!node && parent_node)
371 tree_put_node(parent_node, locked);
372 }
373
tree_remove_node(struct fs_node * node,bool locked)374 static int tree_remove_node(struct fs_node *node, bool locked)
375 {
376 if (refcount_read(&node->refcount) > 1) {
377 refcount_dec(&node->refcount);
378 return -EEXIST;
379 }
380 tree_put_node(node, locked);
381 return 0;
382 }
383
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)384 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
385 unsigned int prio)
386 {
387 struct fs_prio *iter_prio;
388
389 fs_for_each_prio(iter_prio, ns) {
390 if (iter_prio->prio == prio)
391 return iter_prio;
392 }
393
394 return NULL;
395 }
396
is_fwd_next_action(u32 action)397 static bool is_fwd_next_action(u32 action)
398 {
399 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
400 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
401 }
402
check_valid_spec(const struct mlx5_flow_spec * spec)403 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
404 {
405 int i;
406
407 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
408 if (spec->match_value[i] & ~spec->match_criteria[i]) {
409 pr_warn("mlx5_core: match_value differs from match_criteria\n");
410 return false;
411 }
412
413 return true;
414 }
415
find_root(struct fs_node * node)416 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
417 {
418 struct fs_node *root;
419 struct mlx5_flow_namespace *ns;
420
421 root = node->root;
422
423 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
424 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
425 return NULL;
426 }
427
428 ns = container_of(root, struct mlx5_flow_namespace, node);
429 return container_of(ns, struct mlx5_flow_root_namespace, ns);
430 }
431
get_steering(struct fs_node * node)432 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
433 {
434 struct mlx5_flow_root_namespace *root = find_root(node);
435
436 if (root)
437 return root->dev->priv.steering;
438 return NULL;
439 }
440
get_dev(struct fs_node * node)441 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
442 {
443 struct mlx5_flow_root_namespace *root = find_root(node);
444
445 if (root)
446 return root->dev;
447 return NULL;
448 }
449
del_sw_ns(struct fs_node * node)450 static void del_sw_ns(struct fs_node *node)
451 {
452 kfree(node);
453 }
454
del_sw_prio(struct fs_node * node)455 static void del_sw_prio(struct fs_node *node)
456 {
457 kfree(node);
458 }
459
del_hw_flow_table(struct fs_node * node)460 static void del_hw_flow_table(struct fs_node *node)
461 {
462 struct mlx5_flow_root_namespace *root;
463 struct mlx5_flow_table *ft;
464 struct mlx5_core_dev *dev;
465 int err;
466
467 fs_get_obj(ft, node);
468 dev = get_dev(&ft->node);
469 root = find_root(&ft->node);
470 trace_mlx5_fs_del_ft(ft);
471
472 if (node->active) {
473 err = root->cmds->destroy_flow_table(root, ft);
474 if (err)
475 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
476 }
477 }
478
del_sw_flow_table(struct fs_node * node)479 static void del_sw_flow_table(struct fs_node *node)
480 {
481 struct mlx5_flow_table *ft;
482 struct fs_prio *prio;
483
484 fs_get_obj(ft, node);
485
486 rhltable_destroy(&ft->fgs_hash);
487 if (ft->node.parent) {
488 fs_get_obj(prio, ft->node.parent);
489 prio->num_ft--;
490 }
491 kfree(ft);
492 }
493
modify_fte(struct fs_fte * fte)494 static void modify_fte(struct fs_fte *fte)
495 {
496 struct mlx5_flow_root_namespace *root;
497 struct mlx5_flow_table *ft;
498 struct mlx5_flow_group *fg;
499 struct mlx5_core_dev *dev;
500 int err;
501
502 fs_get_obj(fg, fte->node.parent);
503 fs_get_obj(ft, fg->node.parent);
504 dev = get_dev(&fte->node);
505
506 root = find_root(&ft->node);
507 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
508 if (err)
509 mlx5_core_warn(dev,
510 "%s can't del rule fg id=%d fte_index=%d\n",
511 __func__, fg->id, fte->index);
512 fte->modify_mask = 0;
513 }
514
del_sw_hw_rule(struct fs_node * node)515 static void del_sw_hw_rule(struct fs_node *node)
516 {
517 struct mlx5_flow_rule *rule;
518 struct fs_fte *fte;
519
520 fs_get_obj(rule, node);
521 fs_get_obj(fte, rule->node.parent);
522 trace_mlx5_fs_del_rule(rule);
523 if (is_fwd_next_action(rule->sw_action)) {
524 mutex_lock(&rule->dest_attr.ft->lock);
525 list_del(&rule->next_ft);
526 mutex_unlock(&rule->dest_attr.ft->lock);
527 }
528
529 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
530 --fte->dests_size) {
531 fte->modify_mask |=
532 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
533 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
534 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
535 goto out;
536 }
537
538 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
539 --fte->dests_size) {
540 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
541 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
542 goto out;
543 }
544
545 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
546 --fte->dests_size) {
547 fte->modify_mask |=
548 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
549 }
550 out:
551 kfree(rule);
552 }
553
del_hw_fte(struct fs_node * node)554 static void del_hw_fte(struct fs_node *node)
555 {
556 struct mlx5_flow_root_namespace *root;
557 struct mlx5_flow_table *ft;
558 struct mlx5_flow_group *fg;
559 struct mlx5_core_dev *dev;
560 struct fs_fte *fte;
561 int err;
562
563 fs_get_obj(fte, node);
564 fs_get_obj(fg, fte->node.parent);
565 fs_get_obj(ft, fg->node.parent);
566
567 trace_mlx5_fs_del_fte(fte);
568 dev = get_dev(&ft->node);
569 root = find_root(&ft->node);
570 if (node->active) {
571 err = root->cmds->delete_fte(root, ft, fte);
572 if (err)
573 mlx5_core_warn(dev,
574 "flow steering can't delete fte in index %d of flow group id %d\n",
575 fte->index, fg->id);
576 node->active = false;
577 }
578 }
579
del_sw_fte(struct fs_node * node)580 static void del_sw_fte(struct fs_node *node)
581 {
582 struct mlx5_flow_steering *steering = get_steering(node);
583 struct mlx5_flow_group *fg;
584 struct fs_fte *fte;
585 int err;
586
587 fs_get_obj(fte, node);
588 fs_get_obj(fg, fte->node.parent);
589
590 err = rhashtable_remove_fast(&fg->ftes_hash,
591 &fte->hash,
592 rhash_fte);
593 WARN_ON(err);
594 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
595 kmem_cache_free(steering->ftes_cache, fte);
596 }
597
del_hw_flow_group(struct fs_node * node)598 static void del_hw_flow_group(struct fs_node *node)
599 {
600 struct mlx5_flow_root_namespace *root;
601 struct mlx5_flow_group *fg;
602 struct mlx5_flow_table *ft;
603 struct mlx5_core_dev *dev;
604
605 fs_get_obj(fg, node);
606 fs_get_obj(ft, fg->node.parent);
607 dev = get_dev(&ft->node);
608 trace_mlx5_fs_del_fg(fg);
609
610 root = find_root(&ft->node);
611 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
612 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
613 fg->id, ft->id);
614 }
615
del_sw_flow_group(struct fs_node * node)616 static void del_sw_flow_group(struct fs_node *node)
617 {
618 struct mlx5_flow_steering *steering = get_steering(node);
619 struct mlx5_flow_group *fg;
620 struct mlx5_flow_table *ft;
621 int err;
622
623 fs_get_obj(fg, node);
624 fs_get_obj(ft, fg->node.parent);
625
626 rhashtable_destroy(&fg->ftes_hash);
627 ida_destroy(&fg->fte_allocator);
628 if (ft->autogroup.active &&
629 fg->max_ftes == ft->autogroup.group_size &&
630 fg->start_index < ft->autogroup.max_fte)
631 ft->autogroup.num_groups--;
632 err = rhltable_remove(&ft->fgs_hash,
633 &fg->hash,
634 rhash_fg);
635 WARN_ON(err);
636 kmem_cache_free(steering->fgs_cache, fg);
637 }
638
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)639 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
640 {
641 int index;
642 int ret;
643
644 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
645 if (index < 0)
646 return index;
647
648 fte->index = index + fg->start_index;
649 ret = rhashtable_insert_fast(&fg->ftes_hash,
650 &fte->hash,
651 rhash_fte);
652 if (ret)
653 goto err_ida_remove;
654
655 tree_add_node(&fte->node, &fg->node);
656 list_add_tail(&fte->node.list, &fg->node.children);
657 return 0;
658
659 err_ida_remove:
660 ida_free(&fg->fte_allocator, index);
661 return ret;
662 }
663
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)664 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
665 const struct mlx5_flow_spec *spec,
666 struct mlx5_flow_act *flow_act)
667 {
668 struct mlx5_flow_steering *steering = get_steering(&ft->node);
669 struct fs_fte *fte;
670
671 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
672 if (!fte)
673 return ERR_PTR(-ENOMEM);
674
675 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
676 fte->node.type = FS_TYPE_FLOW_ENTRY;
677 fte->action = *flow_act;
678 fte->flow_context = spec->flow_context;
679
680 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
681
682 return fte;
683 }
684
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)685 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
686 struct mlx5_flow_group *fg)
687 {
688 rhashtable_destroy(&fg->ftes_hash);
689 kmem_cache_free(steering->fgs_cache, fg);
690 }
691
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)692 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
693 u8 match_criteria_enable,
694 const void *match_criteria,
695 int start_index,
696 int end_index)
697 {
698 struct mlx5_flow_group *fg;
699 int ret;
700
701 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
702 if (!fg)
703 return ERR_PTR(-ENOMEM);
704
705 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
706 if (ret) {
707 kmem_cache_free(steering->fgs_cache, fg);
708 return ERR_PTR(ret);
709 }
710
711 ida_init(&fg->fte_allocator);
712 fg->mask.match_criteria_enable = match_criteria_enable;
713 memcpy(&fg->mask.match_criteria, match_criteria,
714 sizeof(fg->mask.match_criteria));
715 fg->node.type = FS_TYPE_FLOW_GROUP;
716 fg->start_index = start_index;
717 fg->max_ftes = end_index - start_index + 1;
718
719 return fg;
720 }
721
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)722 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
723 u8 match_criteria_enable,
724 const void *match_criteria,
725 int start_index,
726 int end_index,
727 struct list_head *prev)
728 {
729 struct mlx5_flow_steering *steering = get_steering(&ft->node);
730 struct mlx5_flow_group *fg;
731 int ret;
732
733 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
734 start_index, end_index);
735 if (IS_ERR(fg))
736 return fg;
737
738 /* initialize refcnt, add to parent list */
739 ret = rhltable_insert(&ft->fgs_hash,
740 &fg->hash,
741 rhash_fg);
742 if (ret) {
743 dealloc_flow_group(steering, fg);
744 return ERR_PTR(ret);
745 }
746
747 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
748 tree_add_node(&fg->node, &ft->node);
749 /* Add node to group list */
750 list_add(&fg->node.list, prev);
751 atomic_inc(&ft->node.version);
752
753 return fg;
754 }
755
alloc_flow_table(int level,u16 vport,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)756 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
757 enum fs_flow_table_type table_type,
758 enum fs_flow_table_op_mod op_mod,
759 u32 flags)
760 {
761 struct mlx5_flow_table *ft;
762 int ret;
763
764 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
765 if (!ft)
766 return ERR_PTR(-ENOMEM);
767
768 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
769 if (ret) {
770 kfree(ft);
771 return ERR_PTR(ret);
772 }
773
774 ft->level = level;
775 ft->node.type = FS_TYPE_FLOW_TABLE;
776 ft->op_mod = op_mod;
777 ft->type = table_type;
778 ft->vport = vport;
779 ft->flags = flags;
780 INIT_LIST_HEAD(&ft->fwd_rules);
781 mutex_init(&ft->lock);
782
783 return ft;
784 }
785
786 /* If reverse is false, then we search for the first flow table in the
787 * root sub-tree from start(closest from right), else we search for the
788 * last flow table in the root sub-tree till start(closest from left).
789 */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)790 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
791 struct list_head *start,
792 bool reverse)
793 {
794 #define list_advance_entry(pos, reverse) \
795 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
796
797 #define list_for_each_advance_continue(pos, head, reverse) \
798 for (pos = list_advance_entry(pos, reverse); \
799 &pos->list != (head); \
800 pos = list_advance_entry(pos, reverse))
801
802 struct fs_node *iter = list_entry(start, struct fs_node, list);
803 struct mlx5_flow_table *ft = NULL;
804
805 if (!root)
806 return NULL;
807
808 list_for_each_advance_continue(iter, &root->children, reverse) {
809 if (iter->type == FS_TYPE_FLOW_TABLE) {
810 fs_get_obj(ft, iter);
811 return ft;
812 }
813 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
814 if (ft)
815 return ft;
816 }
817
818 return ft;
819 }
820
find_prio_chains_parent(struct fs_node * parent,struct fs_node ** child)821 static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
822 struct fs_node **child)
823 {
824 struct fs_node *node = NULL;
825
826 while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
827 node = parent;
828 parent = parent->parent;
829 }
830
831 if (child)
832 *child = node;
833
834 return parent;
835 }
836
837 /* If reverse is false then return the first flow table next to the passed node
838 * in the tree, else return the last flow table before the node in the tree.
839 * If skip is true, skip the flow tables in the same prio_chains prio.
840 */
find_closest_ft(struct fs_node * node,bool reverse,bool skip)841 static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
842 bool skip)
843 {
844 struct fs_node *prio_chains_parent = NULL;
845 struct mlx5_flow_table *ft = NULL;
846 struct fs_node *curr_node;
847 struct fs_node *parent;
848
849 if (skip)
850 prio_chains_parent = find_prio_chains_parent(node, NULL);
851 parent = node->parent;
852 curr_node = node;
853 while (!ft && parent) {
854 if (parent != prio_chains_parent)
855 ft = find_closest_ft_recursive(parent, &curr_node->list,
856 reverse);
857 curr_node = parent;
858 parent = curr_node->parent;
859 }
860 return ft;
861 }
862
863 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_node * node)864 static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
865 {
866 return find_closest_ft(node, false, true);
867 }
868
869 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_node * node)870 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
871 {
872 return find_closest_ft(node, true, true);
873 }
874
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)875 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
876 struct mlx5_flow_act *flow_act)
877 {
878 struct fs_prio *prio;
879 bool next_ns;
880
881 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
882 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
883
884 return find_next_chained_ft(&prio->node);
885 }
886
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)887 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
888 struct fs_prio *prio,
889 struct mlx5_flow_table *ft)
890 {
891 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
892 struct mlx5_flow_table *iter;
893 int err;
894
895 fs_for_each_ft(iter, prio) {
896 err = root->cmds->modify_flow_table(root, iter, ft);
897 if (err) {
898 mlx5_core_err(dev,
899 "Failed to modify flow table id %d, type %d, err %d\n",
900 iter->id, iter->type, err);
901 /* The driver is out of sync with the FW */
902 return err;
903 }
904 }
905 return 0;
906 }
907
find_closet_ft_prio_chains(struct fs_node * node,struct fs_node * parent,struct fs_node ** child,bool reverse)908 static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
909 struct fs_node *parent,
910 struct fs_node **child,
911 bool reverse)
912 {
913 struct mlx5_flow_table *ft;
914
915 ft = find_closest_ft(node, reverse, false);
916
917 if (ft && parent == find_prio_chains_parent(&ft->node, child))
918 return ft;
919
920 return NULL;
921 }
922
923 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)924 static int connect_prev_fts(struct mlx5_core_dev *dev,
925 struct mlx5_flow_table *ft,
926 struct fs_prio *prio)
927 {
928 struct fs_node *prio_parent, *parent = NULL, *child, *node;
929 struct mlx5_flow_table *prev_ft;
930 int err = 0;
931
932 prio_parent = find_prio_chains_parent(&prio->node, &child);
933
934 /* return directly if not under the first sub ns of prio_chains prio */
935 if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
936 return 0;
937
938 prev_ft = find_prev_chained_ft(&prio->node);
939 while (prev_ft) {
940 struct fs_prio *prev_prio;
941
942 fs_get_obj(prev_prio, prev_ft->node.parent);
943 err = connect_fts_in_prio(dev, prev_prio, ft);
944 if (err)
945 break;
946
947 if (!parent) {
948 parent = find_prio_chains_parent(&prev_prio->node, &child);
949 if (!parent)
950 break;
951 }
952
953 node = child;
954 prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
955 }
956 return err;
957 }
958
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)959 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
960 *prio)
961 {
962 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
963 struct mlx5_ft_underlay_qp *uqp;
964 int min_level = INT_MAX;
965 int err = 0;
966 u32 qpn;
967
968 if (root->root_ft)
969 min_level = root->root_ft->level;
970
971 if (ft->level >= min_level)
972 return 0;
973
974 if (list_empty(&root->underlay_qpns)) {
975 /* Don't set any QPN (zero) in case QPN list is empty */
976 qpn = 0;
977 err = root->cmds->update_root_ft(root, ft, qpn, false);
978 } else {
979 list_for_each_entry(uqp, &root->underlay_qpns, list) {
980 qpn = uqp->qpn;
981 err = root->cmds->update_root_ft(root, ft,
982 qpn, false);
983 if (err)
984 break;
985 }
986 }
987
988 if (err)
989 mlx5_core_warn(root->dev,
990 "Update root flow table of id(%u) qpn(%d) failed\n",
991 ft->id, qpn);
992 else
993 root->root_ft = ft;
994
995 return err;
996 }
997
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)998 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
999 struct mlx5_flow_destination *dest)
1000 {
1001 struct mlx5_flow_root_namespace *root;
1002 struct mlx5_flow_table *ft;
1003 struct mlx5_flow_group *fg;
1004 struct fs_fte *fte;
1005 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1006 int err = 0;
1007
1008 fs_get_obj(fte, rule->node.parent);
1009 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1010 return -EINVAL;
1011 down_write_ref_node(&fte->node, false);
1012 fs_get_obj(fg, fte->node.parent);
1013 fs_get_obj(ft, fg->node.parent);
1014
1015 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1016 root = find_root(&ft->node);
1017 err = root->cmds->update_fte(root, ft, fg,
1018 modify_mask, fte);
1019 up_write_ref_node(&fte->node, false);
1020
1021 return err;
1022 }
1023
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)1024 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1025 struct mlx5_flow_destination *new_dest,
1026 struct mlx5_flow_destination *old_dest)
1027 {
1028 int i;
1029
1030 if (!old_dest) {
1031 if (handle->num_rules != 1)
1032 return -EINVAL;
1033 return _mlx5_modify_rule_destination(handle->rule[0],
1034 new_dest);
1035 }
1036
1037 for (i = 0; i < handle->num_rules; i++) {
1038 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1039 return _mlx5_modify_rule_destination(handle->rule[i],
1040 new_dest);
1041 }
1042
1043 return -EINVAL;
1044 }
1045
1046 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)1047 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1048 struct mlx5_flow_table *new_next_ft,
1049 struct mlx5_flow_table *old_next_ft)
1050 {
1051 struct mlx5_flow_destination dest = {};
1052 struct mlx5_flow_rule *iter;
1053 int err = 0;
1054
1055 /* new_next_ft and old_next_ft could be NULL only
1056 * when we create/destroy the anchor flow table.
1057 */
1058 if (!new_next_ft || !old_next_ft)
1059 return 0;
1060
1061 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1062 dest.ft = new_next_ft;
1063
1064 mutex_lock(&old_next_ft->lock);
1065 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1066 mutex_unlock(&old_next_ft->lock);
1067 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1068 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1069 iter->ft->ns == new_next_ft->ns)
1070 continue;
1071
1072 err = _mlx5_modify_rule_destination(iter, &dest);
1073 if (err)
1074 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1075 new_next_ft->id);
1076 }
1077 return 0;
1078 }
1079
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1080 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1081 struct fs_prio *prio)
1082 {
1083 struct mlx5_flow_table *next_ft, *first_ft;
1084 int err = 0;
1085
1086 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1087
1088 first_ft = list_first_entry_or_null(&prio->node.children,
1089 struct mlx5_flow_table, node.list);
1090 if (!first_ft || first_ft->level > ft->level) {
1091 err = connect_prev_fts(dev, ft, prio);
1092 if (err)
1093 return err;
1094
1095 next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
1096 err = connect_fwd_rules(dev, ft, next_ft);
1097 if (err)
1098 return err;
1099 }
1100
1101 if (MLX5_CAP_FLOWTABLE(dev,
1102 flow_table_properties_nic_receive.modify_root))
1103 err = update_root_ft_create(ft, prio);
1104 return err;
1105 }
1106
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1107 static void list_add_flow_table(struct mlx5_flow_table *ft,
1108 struct fs_prio *prio)
1109 {
1110 struct list_head *prev = &prio->node.children;
1111 struct mlx5_flow_table *iter;
1112
1113 fs_for_each_ft(iter, prio) {
1114 if (iter->level > ft->level)
1115 break;
1116 prev = &iter->node.list;
1117 }
1118 list_add(&ft->node.list, prev);
1119 }
1120
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1121 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1122 struct mlx5_flow_table_attr *ft_attr,
1123 enum fs_flow_table_op_mod op_mod,
1124 u16 vport)
1125 {
1126 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1127 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1128 struct mlx5_flow_table *next_ft;
1129 struct fs_prio *fs_prio = NULL;
1130 struct mlx5_flow_table *ft;
1131 int err;
1132
1133 if (!root) {
1134 pr_err("mlx5: flow steering failed to find root of namespace\n");
1135 return ERR_PTR(-ENODEV);
1136 }
1137
1138 mutex_lock(&root->chain_lock);
1139 fs_prio = find_prio(ns, ft_attr->prio);
1140 if (!fs_prio) {
1141 err = -EINVAL;
1142 goto unlock_root;
1143 }
1144 if (!unmanaged) {
1145 /* The level is related to the
1146 * priority level range.
1147 */
1148 if (ft_attr->level >= fs_prio->num_levels) {
1149 err = -ENOSPC;
1150 goto unlock_root;
1151 }
1152
1153 ft_attr->level += fs_prio->start_level;
1154 }
1155
1156 /* The level is related to the
1157 * priority level range.
1158 */
1159 ft = alloc_flow_table(ft_attr->level,
1160 vport,
1161 root->table_type,
1162 op_mod, ft_attr->flags);
1163 if (IS_ERR(ft)) {
1164 err = PTR_ERR(ft);
1165 goto unlock_root;
1166 }
1167
1168 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1169 next_ft = unmanaged ? ft_attr->next_ft :
1170 find_next_chained_ft(&fs_prio->node);
1171 ft->def_miss_action = ns->def_miss_action;
1172 ft->ns = ns;
1173 err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1174 if (err)
1175 goto free_ft;
1176
1177 if (!unmanaged) {
1178 err = connect_flow_table(root->dev, ft, fs_prio);
1179 if (err)
1180 goto destroy_ft;
1181 }
1182
1183 ft->node.active = true;
1184 down_write_ref_node(&fs_prio->node, false);
1185 if (!unmanaged) {
1186 tree_add_node(&ft->node, &fs_prio->node);
1187 list_add_flow_table(ft, fs_prio);
1188 } else {
1189 ft->node.root = fs_prio->node.root;
1190 }
1191 fs_prio->num_ft++;
1192 up_write_ref_node(&fs_prio->node, false);
1193 mutex_unlock(&root->chain_lock);
1194 trace_mlx5_fs_add_ft(ft);
1195 return ft;
1196 destroy_ft:
1197 root->cmds->destroy_flow_table(root, ft);
1198 free_ft:
1199 rhltable_destroy(&ft->fgs_hash);
1200 kfree(ft);
1201 unlock_root:
1202 mutex_unlock(&root->chain_lock);
1203 return ERR_PTR(err);
1204 }
1205
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1206 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1207 struct mlx5_flow_table_attr *ft_attr)
1208 {
1209 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1210 }
1211 EXPORT_SYMBOL(mlx5_create_flow_table);
1212
1213 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1214 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1215 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1216 {
1217 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1218 }
1219
1220 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1221 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1222 int prio, u32 level)
1223 {
1224 struct mlx5_flow_table_attr ft_attr = {};
1225
1226 ft_attr.level = level;
1227 ft_attr.prio = prio;
1228 ft_attr.max_fte = 1;
1229
1230 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1231 }
1232 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1233
1234 #define MAX_FLOW_GROUP_SIZE BIT(24)
1235 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1236 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1237 struct mlx5_flow_table_attr *ft_attr)
1238 {
1239 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1240 int max_num_groups = ft_attr->autogroup.max_num_groups;
1241 struct mlx5_flow_table *ft;
1242 int autogroups_max_fte;
1243
1244 ft = mlx5_create_flow_table(ns, ft_attr);
1245 if (IS_ERR(ft))
1246 return ft;
1247
1248 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1249 if (max_num_groups > autogroups_max_fte)
1250 goto err_validate;
1251 if (num_reserved_entries > ft->max_fte)
1252 goto err_validate;
1253
1254 /* Align the number of groups according to the largest group size */
1255 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1256 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1257
1258 ft->autogroup.active = true;
1259 ft->autogroup.required_groups = max_num_groups;
1260 ft->autogroup.max_fte = autogroups_max_fte;
1261 /* We save place for flow groups in addition to max types */
1262 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1263
1264 return ft;
1265
1266 err_validate:
1267 mlx5_destroy_flow_table(ft);
1268 return ERR_PTR(-ENOSPC);
1269 }
1270 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1271
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1272 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1273 u32 *fg_in)
1274 {
1275 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1276 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1277 fg_in, match_criteria);
1278 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1279 fg_in,
1280 match_criteria_enable);
1281 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1282 start_flow_index);
1283 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1284 end_flow_index);
1285 struct mlx5_flow_group *fg;
1286 int err;
1287
1288 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1289 return ERR_PTR(-EPERM);
1290
1291 down_write_ref_node(&ft->node, false);
1292 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1293 start_index, end_index,
1294 ft->node.children.prev);
1295 up_write_ref_node(&ft->node, false);
1296 if (IS_ERR(fg))
1297 return fg;
1298
1299 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1300 if (err) {
1301 tree_put_node(&fg->node, false);
1302 return ERR_PTR(err);
1303 }
1304 trace_mlx5_fs_add_fg(fg);
1305 fg->node.active = true;
1306
1307 return fg;
1308 }
1309 EXPORT_SYMBOL(mlx5_create_flow_group);
1310
alloc_rule(struct mlx5_flow_destination * dest)1311 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1312 {
1313 struct mlx5_flow_rule *rule;
1314
1315 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1316 if (!rule)
1317 return NULL;
1318
1319 INIT_LIST_HEAD(&rule->next_ft);
1320 rule->node.type = FS_TYPE_FLOW_DEST;
1321 if (dest)
1322 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1323
1324 return rule;
1325 }
1326
alloc_handle(int num_rules)1327 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1328 {
1329 struct mlx5_flow_handle *handle;
1330
1331 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1332 if (!handle)
1333 return NULL;
1334
1335 handle->num_rules = num_rules;
1336
1337 return handle;
1338 }
1339
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1340 static void destroy_flow_handle(struct fs_fte *fte,
1341 struct mlx5_flow_handle *handle,
1342 struct mlx5_flow_destination *dest,
1343 int i)
1344 {
1345 for (; --i >= 0;) {
1346 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1347 fte->dests_size--;
1348 list_del(&handle->rule[i]->node.list);
1349 kfree(handle->rule[i]);
1350 }
1351 }
1352 kfree(handle);
1353 }
1354
1355 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1356 create_flow_handle(struct fs_fte *fte,
1357 struct mlx5_flow_destination *dest,
1358 int dest_num,
1359 int *modify_mask,
1360 bool *new_rule)
1361 {
1362 struct mlx5_flow_handle *handle;
1363 struct mlx5_flow_rule *rule = NULL;
1364 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1365 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1366 int type;
1367 int i = 0;
1368
1369 handle = alloc_handle((dest_num) ? dest_num : 1);
1370 if (!handle)
1371 return ERR_PTR(-ENOMEM);
1372
1373 do {
1374 if (dest) {
1375 rule = find_flow_rule(fte, dest + i);
1376 if (rule) {
1377 refcount_inc(&rule->node.refcount);
1378 goto rule_found;
1379 }
1380 }
1381
1382 *new_rule = true;
1383 rule = alloc_rule(dest + i);
1384 if (!rule)
1385 goto free_rules;
1386
1387 /* Add dest to dests list- we need flow tables to be in the
1388 * end of the list for forward to next prio rules.
1389 */
1390 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1391 if (dest &&
1392 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1393 list_add(&rule->node.list, &fte->node.children);
1394 else
1395 list_add_tail(&rule->node.list, &fte->node.children);
1396 if (dest) {
1397 fte->dests_size++;
1398
1399 type = dest[i].type ==
1400 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1401 *modify_mask |= type ? count : dst;
1402 }
1403 rule_found:
1404 handle->rule[i] = rule;
1405 } while (++i < dest_num);
1406
1407 return handle;
1408
1409 free_rules:
1410 destroy_flow_handle(fte, handle, dest, i);
1411 return ERR_PTR(-ENOMEM);
1412 }
1413
1414 /* fte should not be deleted while calling this function */
1415 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1416 add_rule_fte(struct fs_fte *fte,
1417 struct mlx5_flow_group *fg,
1418 struct mlx5_flow_destination *dest,
1419 int dest_num,
1420 bool update_action)
1421 {
1422 struct mlx5_flow_root_namespace *root;
1423 struct mlx5_flow_handle *handle;
1424 struct mlx5_flow_table *ft;
1425 int modify_mask = 0;
1426 int err;
1427 bool new_rule = false;
1428
1429 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1430 &new_rule);
1431 if (IS_ERR(handle) || !new_rule)
1432 goto out;
1433
1434 if (update_action)
1435 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1436
1437 fs_get_obj(ft, fg->node.parent);
1438 root = find_root(&fg->node);
1439 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1440 err = root->cmds->create_fte(root, ft, fg, fte);
1441 else
1442 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1443 if (err)
1444 goto free_handle;
1445
1446 fte->node.active = true;
1447 fte->status |= FS_FTE_STATUS_EXISTING;
1448 atomic_inc(&fg->node.version);
1449
1450 out:
1451 return handle;
1452
1453 free_handle:
1454 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1455 return ERR_PTR(err);
1456 }
1457
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1458 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1459 const struct mlx5_flow_spec *spec)
1460 {
1461 struct list_head *prev = &ft->node.children;
1462 u32 max_fte = ft->autogroup.max_fte;
1463 unsigned int candidate_index = 0;
1464 unsigned int group_size = 0;
1465 struct mlx5_flow_group *fg;
1466
1467 if (!ft->autogroup.active)
1468 return ERR_PTR(-ENOENT);
1469
1470 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1471 group_size = ft->autogroup.group_size;
1472
1473 /* max_fte == ft->autogroup.max_types */
1474 if (group_size == 0)
1475 group_size = 1;
1476
1477 /* sorted by start_index */
1478 fs_for_each_fg(fg, ft) {
1479 if (candidate_index + group_size > fg->start_index)
1480 candidate_index = fg->start_index + fg->max_ftes;
1481 else
1482 break;
1483 prev = &fg->node.list;
1484 }
1485
1486 if (candidate_index + group_size > max_fte)
1487 return ERR_PTR(-ENOSPC);
1488
1489 fg = alloc_insert_flow_group(ft,
1490 spec->match_criteria_enable,
1491 spec->match_criteria,
1492 candidate_index,
1493 candidate_index + group_size - 1,
1494 prev);
1495 if (IS_ERR(fg))
1496 goto out;
1497
1498 if (group_size == ft->autogroup.group_size)
1499 ft->autogroup.num_groups++;
1500
1501 out:
1502 return fg;
1503 }
1504
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1505 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1506 struct mlx5_flow_group *fg)
1507 {
1508 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1509 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1510 void *match_criteria_addr;
1511 u8 src_esw_owner_mask_on;
1512 void *misc;
1513 int err;
1514 u32 *in;
1515
1516 in = kvzalloc(inlen, GFP_KERNEL);
1517 if (!in)
1518 return -ENOMEM;
1519
1520 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1521 fg->mask.match_criteria_enable);
1522 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1523 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1524 fg->max_ftes - 1);
1525
1526 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1527 misc_parameters);
1528 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1529 source_eswitch_owner_vhca_id);
1530 MLX5_SET(create_flow_group_in, in,
1531 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1532
1533 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1534 in, match_criteria);
1535 memcpy(match_criteria_addr, fg->mask.match_criteria,
1536 sizeof(fg->mask.match_criteria));
1537
1538 err = root->cmds->create_flow_group(root, ft, in, fg);
1539 if (!err) {
1540 fg->node.active = true;
1541 trace_mlx5_fs_add_fg(fg);
1542 }
1543
1544 kvfree(in);
1545 return err;
1546 }
1547
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1548 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1549 struct mlx5_flow_destination *d2)
1550 {
1551 if (d1->type == d2->type) {
1552 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1553 d1->vport.num == d2->vport.num &&
1554 d1->vport.flags == d2->vport.flags &&
1555 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1556 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1557 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1558 (d1->vport.pkt_reformat->id ==
1559 d2->vport.pkt_reformat->id) : true)) ||
1560 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1561 d1->ft == d2->ft) ||
1562 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1563 d1->tir_num == d2->tir_num) ||
1564 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1565 d1->ft_num == d2->ft_num) ||
1566 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1567 d1->sampler_id == d2->sampler_id))
1568 return true;
1569 }
1570
1571 return false;
1572 }
1573
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1574 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1575 struct mlx5_flow_destination *dest)
1576 {
1577 struct mlx5_flow_rule *rule;
1578
1579 list_for_each_entry(rule, &fte->node.children, node.list) {
1580 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1581 return rule;
1582 }
1583 return NULL;
1584 }
1585
check_conflicting_actions_vlan(const struct mlx5_fs_vlan * vlan0,const struct mlx5_fs_vlan * vlan1)1586 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1587 const struct mlx5_fs_vlan *vlan1)
1588 {
1589 return vlan0->ethtype != vlan1->ethtype ||
1590 vlan0->vid != vlan1->vid ||
1591 vlan0->prio != vlan1->prio;
1592 }
1593
check_conflicting_actions(const struct mlx5_flow_act * act1,const struct mlx5_flow_act * act2)1594 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1595 const struct mlx5_flow_act *act2)
1596 {
1597 u32 action1 = act1->action;
1598 u32 action2 = act2->action;
1599 u32 xored_actions;
1600
1601 xored_actions = action1 ^ action2;
1602
1603 /* if one rule only wants to count, it's ok */
1604 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1605 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1606 return false;
1607
1608 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1609 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1610 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1611 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1612 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1613 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1614 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1615 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1616 return true;
1617
1618 if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1619 act1->pkt_reformat != act2->pkt_reformat)
1620 return true;
1621
1622 if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1623 act1->modify_hdr != act2->modify_hdr)
1624 return true;
1625
1626 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1627 check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1628 return true;
1629
1630 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1631 check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1632 return true;
1633
1634 return false;
1635 }
1636
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1637 static int check_conflicting_ftes(struct fs_fte *fte,
1638 const struct mlx5_flow_context *flow_context,
1639 const struct mlx5_flow_act *flow_act)
1640 {
1641 if (check_conflicting_actions(flow_act, &fte->action)) {
1642 mlx5_core_warn(get_dev(&fte->node),
1643 "Found two FTEs with conflicting actions\n");
1644 return -EEXIST;
1645 }
1646
1647 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1648 fte->flow_context.flow_tag != flow_context->flow_tag) {
1649 mlx5_core_warn(get_dev(&fte->node),
1650 "FTE flow tag %u already exists with different flow tag %u\n",
1651 fte->flow_context.flow_tag,
1652 flow_context->flow_tag);
1653 return -EEXIST;
1654 }
1655
1656 return 0;
1657 }
1658
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1659 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1660 const struct mlx5_flow_spec *spec,
1661 struct mlx5_flow_act *flow_act,
1662 struct mlx5_flow_destination *dest,
1663 int dest_num,
1664 struct fs_fte *fte)
1665 {
1666 struct mlx5_flow_handle *handle;
1667 int old_action;
1668 int i;
1669 int ret;
1670
1671 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1672 if (ret)
1673 return ERR_PTR(ret);
1674
1675 old_action = fte->action.action;
1676 fte->action.action |= flow_act->action;
1677 handle = add_rule_fte(fte, fg, dest, dest_num,
1678 old_action != flow_act->action);
1679 if (IS_ERR(handle)) {
1680 fte->action.action = old_action;
1681 return handle;
1682 }
1683 trace_mlx5_fs_set_fte(fte, false);
1684
1685 for (i = 0; i < handle->num_rules; i++) {
1686 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1687 tree_add_node(&handle->rule[i]->node, &fte->node);
1688 trace_mlx5_fs_add_rule(handle->rule[i]);
1689 }
1690 }
1691 return handle;
1692 }
1693
counter_is_valid(u32 action)1694 static bool counter_is_valid(u32 action)
1695 {
1696 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1697 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1698 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1699 }
1700
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1701 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1702 struct mlx5_flow_act *flow_act,
1703 struct mlx5_flow_table *ft)
1704 {
1705 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1706 u32 action = flow_act->action;
1707
1708 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1709 return counter_is_valid(action);
1710
1711 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1712 return true;
1713
1714 if (ignore_level) {
1715 if (ft->type != FS_FT_FDB &&
1716 ft->type != FS_FT_NIC_RX)
1717 return false;
1718
1719 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1720 ft->type != dest->ft->type)
1721 return false;
1722 }
1723
1724 if (!dest || ((dest->type ==
1725 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1726 (dest->ft->level <= ft->level && !ignore_level)))
1727 return false;
1728 return true;
1729 }
1730
1731 struct match_list {
1732 struct list_head list;
1733 struct mlx5_flow_group *g;
1734 };
1735
free_match_list(struct match_list * head,bool ft_locked)1736 static void free_match_list(struct match_list *head, bool ft_locked)
1737 {
1738 struct match_list *iter, *match_tmp;
1739
1740 list_for_each_entry_safe(iter, match_tmp, &head->list,
1741 list) {
1742 tree_put_node(&iter->g->node, ft_locked);
1743 list_del(&iter->list);
1744 kfree(iter);
1745 }
1746 }
1747
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,bool ft_locked)1748 static int build_match_list(struct match_list *match_head,
1749 struct mlx5_flow_table *ft,
1750 const struct mlx5_flow_spec *spec,
1751 bool ft_locked)
1752 {
1753 struct rhlist_head *tmp, *list;
1754 struct mlx5_flow_group *g;
1755 int err = 0;
1756
1757 rcu_read_lock();
1758 INIT_LIST_HEAD(&match_head->list);
1759 /* Collect all fgs which has a matching match_criteria */
1760 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1761 /* RCU is atomic, we can't execute FW commands here */
1762 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1763 struct match_list *curr_match;
1764
1765 if (unlikely(!tree_get_node(&g->node)))
1766 continue;
1767
1768 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1769 if (!curr_match) {
1770 rcu_read_unlock();
1771 free_match_list(match_head, ft_locked);
1772 return -ENOMEM;
1773 }
1774 curr_match->g = g;
1775 list_add_tail(&curr_match->list, &match_head->list);
1776 }
1777 rcu_read_unlock();
1778 return err;
1779 }
1780
matched_fgs_get_version(struct list_head * match_head)1781 static u64 matched_fgs_get_version(struct list_head *match_head)
1782 {
1783 struct match_list *iter;
1784 u64 version = 0;
1785
1786 list_for_each_entry(iter, match_head, list)
1787 version += (u64)atomic_read(&iter->g->node.version);
1788 return version;
1789 }
1790
1791 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1792 lookup_fte_locked(struct mlx5_flow_group *g,
1793 const u32 *match_value,
1794 bool take_write)
1795 {
1796 struct fs_fte *fte_tmp;
1797
1798 if (take_write)
1799 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1800 else
1801 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1802 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1803 rhash_fte);
1804 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1805 fte_tmp = NULL;
1806 goto out;
1807 }
1808 if (!fte_tmp->node.active) {
1809 tree_put_node(&fte_tmp->node, false);
1810 fte_tmp = NULL;
1811 goto out;
1812 }
1813
1814 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1815 out:
1816 if (take_write)
1817 up_write_ref_node(&g->node, false);
1818 else
1819 up_read_ref_node(&g->node);
1820 return fte_tmp;
1821 }
1822
1823 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1824 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1825 struct list_head *match_head,
1826 const struct mlx5_flow_spec *spec,
1827 struct mlx5_flow_act *flow_act,
1828 struct mlx5_flow_destination *dest,
1829 int dest_num,
1830 int ft_version)
1831 {
1832 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1833 struct mlx5_flow_group *g;
1834 struct mlx5_flow_handle *rule;
1835 struct match_list *iter;
1836 bool take_write = false;
1837 struct fs_fte *fte;
1838 u64 version = 0;
1839 int err;
1840
1841 fte = alloc_fte(ft, spec, flow_act);
1842 if (IS_ERR(fte))
1843 return ERR_PTR(-ENOMEM);
1844
1845 search_again_locked:
1846 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1847 goto skip_search;
1848 version = matched_fgs_get_version(match_head);
1849 /* Try to find an fte with identical match value and attempt update its
1850 * action.
1851 */
1852 list_for_each_entry(iter, match_head, list) {
1853 struct fs_fte *fte_tmp;
1854
1855 g = iter->g;
1856 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1857 if (!fte_tmp)
1858 continue;
1859 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1860 /* No error check needed here, because insert_fte() is not called */
1861 up_write_ref_node(&fte_tmp->node, false);
1862 tree_put_node(&fte_tmp->node, false);
1863 kmem_cache_free(steering->ftes_cache, fte);
1864 return rule;
1865 }
1866
1867 skip_search:
1868 /* No group with matching fte found, or we skipped the search.
1869 * Try to add a new fte to any matching fg.
1870 */
1871
1872 /* Check the ft version, for case that new flow group
1873 * was added while the fgs weren't locked
1874 */
1875 if (atomic_read(&ft->node.version) != ft_version) {
1876 rule = ERR_PTR(-EAGAIN);
1877 goto out;
1878 }
1879
1880 /* Check the fgs version. If version have changed it could be that an
1881 * FTE with the same match value was added while the fgs weren't
1882 * locked.
1883 */
1884 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1885 version != matched_fgs_get_version(match_head)) {
1886 take_write = true;
1887 goto search_again_locked;
1888 }
1889
1890 list_for_each_entry(iter, match_head, list) {
1891 g = iter->g;
1892
1893 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1894
1895 if (!g->node.active) {
1896 up_write_ref_node(&g->node, false);
1897 continue;
1898 }
1899
1900 err = insert_fte(g, fte);
1901 if (err) {
1902 up_write_ref_node(&g->node, false);
1903 if (err == -ENOSPC)
1904 continue;
1905 kmem_cache_free(steering->ftes_cache, fte);
1906 return ERR_PTR(err);
1907 }
1908
1909 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1910 up_write_ref_node(&g->node, false);
1911 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1912 up_write_ref_node(&fte->node, false);
1913 if (IS_ERR(rule))
1914 tree_put_node(&fte->node, false);
1915 return rule;
1916 }
1917 rule = ERR_PTR(-ENOENT);
1918 out:
1919 kmem_cache_free(steering->ftes_cache, fte);
1920 return rule;
1921 }
1922
1923 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1924 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1925 const struct mlx5_flow_spec *spec,
1926 struct mlx5_flow_act *flow_act,
1927 struct mlx5_flow_destination *dest,
1928 int dest_num)
1929
1930 {
1931 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1932 struct mlx5_flow_handle *rule;
1933 struct match_list match_head;
1934 struct mlx5_flow_group *g;
1935 bool take_write = false;
1936 struct fs_fte *fte;
1937 int version;
1938 int err;
1939 int i;
1940
1941 if (!check_valid_spec(spec))
1942 return ERR_PTR(-EINVAL);
1943
1944 for (i = 0; i < dest_num; i++) {
1945 if (!dest_is_valid(&dest[i], flow_act, ft))
1946 return ERR_PTR(-EINVAL);
1947 }
1948 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1949 search_again_locked:
1950 version = atomic_read(&ft->node.version);
1951
1952 /* Collect all fgs which has a matching match_criteria */
1953 err = build_match_list(&match_head, ft, spec, take_write);
1954 if (err) {
1955 if (take_write)
1956 up_write_ref_node(&ft->node, false);
1957 else
1958 up_read_ref_node(&ft->node);
1959 return ERR_PTR(err);
1960 }
1961
1962 if (!take_write)
1963 up_read_ref_node(&ft->node);
1964
1965 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1966 dest_num, version);
1967 free_match_list(&match_head, take_write);
1968 if (!IS_ERR(rule) ||
1969 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1970 if (take_write)
1971 up_write_ref_node(&ft->node, false);
1972 return rule;
1973 }
1974
1975 if (!take_write) {
1976 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1977 take_write = true;
1978 }
1979
1980 if (PTR_ERR(rule) == -EAGAIN ||
1981 version != atomic_read(&ft->node.version))
1982 goto search_again_locked;
1983
1984 g = alloc_auto_flow_group(ft, spec);
1985 if (IS_ERR(g)) {
1986 rule = ERR_CAST(g);
1987 up_write_ref_node(&ft->node, false);
1988 return rule;
1989 }
1990
1991 fte = alloc_fte(ft, spec, flow_act);
1992 if (IS_ERR(fte)) {
1993 up_write_ref_node(&ft->node, false);
1994 err = PTR_ERR(fte);
1995 goto err_alloc_fte;
1996 }
1997
1998 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1999 up_write_ref_node(&ft->node, false);
2000
2001 err = create_auto_flow_group(ft, g);
2002 if (err)
2003 goto err_release_fg;
2004
2005 err = insert_fte(g, fte);
2006 if (err)
2007 goto err_release_fg;
2008
2009 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2010 up_write_ref_node(&g->node, false);
2011 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2012 up_write_ref_node(&fte->node, false);
2013 if (IS_ERR(rule))
2014 tree_put_node(&fte->node, false);
2015 tree_put_node(&g->node, false);
2016 return rule;
2017
2018 err_release_fg:
2019 up_write_ref_node(&g->node, false);
2020 kmem_cache_free(steering->ftes_cache, fte);
2021 err_alloc_fte:
2022 tree_put_node(&g->node, false);
2023 return ERR_PTR(err);
2024 }
2025
fwd_next_prio_supported(struct mlx5_flow_table * ft)2026 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2027 {
2028 return ((ft->type == FS_FT_NIC_RX) &&
2029 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2030 }
2031
2032 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)2033 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2034 const struct mlx5_flow_spec *spec,
2035 struct mlx5_flow_act *flow_act,
2036 struct mlx5_flow_destination *dest,
2037 int num_dest)
2038 {
2039 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2040 static const struct mlx5_flow_spec zero_spec = {};
2041 struct mlx5_flow_destination *gen_dest = NULL;
2042 struct mlx5_flow_table *next_ft = NULL;
2043 struct mlx5_flow_handle *handle = NULL;
2044 u32 sw_action = flow_act->action;
2045 int i;
2046
2047 if (!spec)
2048 spec = &zero_spec;
2049
2050 if (!is_fwd_next_action(sw_action))
2051 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2052
2053 if (!fwd_next_prio_supported(ft))
2054 return ERR_PTR(-EOPNOTSUPP);
2055
2056 mutex_lock(&root->chain_lock);
2057 next_ft = find_next_fwd_ft(ft, flow_act);
2058 if (!next_ft) {
2059 handle = ERR_PTR(-EOPNOTSUPP);
2060 goto unlock;
2061 }
2062
2063 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2064 GFP_KERNEL);
2065 if (!gen_dest) {
2066 handle = ERR_PTR(-ENOMEM);
2067 goto unlock;
2068 }
2069 for (i = 0; i < num_dest; i++)
2070 gen_dest[i] = dest[i];
2071 gen_dest[i].type =
2072 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2073 gen_dest[i].ft = next_ft;
2074 dest = gen_dest;
2075 num_dest++;
2076 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2077 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2078 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2079 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2080 if (IS_ERR(handle))
2081 goto unlock;
2082
2083 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2084 mutex_lock(&next_ft->lock);
2085 list_add(&handle->rule[num_dest - 1]->next_ft,
2086 &next_ft->fwd_rules);
2087 mutex_unlock(&next_ft->lock);
2088 handle->rule[num_dest - 1]->sw_action = sw_action;
2089 handle->rule[num_dest - 1]->ft = ft;
2090 }
2091 unlock:
2092 mutex_unlock(&root->chain_lock);
2093 kfree(gen_dest);
2094 return handle;
2095 }
2096 EXPORT_SYMBOL(mlx5_add_flow_rules);
2097
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2098 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2099 {
2100 struct fs_fte *fte;
2101 int i;
2102
2103 /* In order to consolidate the HW changes we lock the FTE for other
2104 * changes, and increase its refcount, in order not to perform the
2105 * "del" functions of the FTE. Will handle them here.
2106 * The removal of the rules is done under locked FTE.
2107 * After removing all the handle's rules, if there are remaining
2108 * rules, it means we just need to modify the FTE in FW, and
2109 * unlock/decrease the refcount we increased before.
2110 * Otherwise, it means the FTE should be deleted. First delete the
2111 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2112 * the FTE, which will handle the last decrease of the refcount, as
2113 * well as required handling of its parent.
2114 */
2115 fs_get_obj(fte, handle->rule[0]->node.parent);
2116 down_write_ref_node(&fte->node, false);
2117 for (i = handle->num_rules - 1; i >= 0; i--)
2118 tree_remove_node(&handle->rule[i]->node, true);
2119 if (list_empty(&fte->node.children)) {
2120 del_hw_fte(&fte->node);
2121 /* Avoid double call to del_hw_fte */
2122 fte->node.del_hw_func = NULL;
2123 up_write_ref_node(&fte->node, false);
2124 tree_put_node(&fte->node, false);
2125 } else if (fte->dests_size) {
2126 if (fte->modify_mask)
2127 modify_fte(fte);
2128 up_write_ref_node(&fte->node, false);
2129 } else {
2130 up_write_ref_node(&fte->node, false);
2131 }
2132 kfree(handle);
2133 }
2134 EXPORT_SYMBOL(mlx5_del_flow_rules);
2135
2136 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2137 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2138 {
2139 struct fs_node *prio_parent, *child;
2140 struct fs_prio *prio;
2141
2142 fs_get_obj(prio, ft->node.parent);
2143
2144 if (!list_is_last(&ft->node.list, &prio->node.children))
2145 return list_next_entry(ft, node.list);
2146
2147 prio_parent = find_prio_chains_parent(&prio->node, &child);
2148
2149 if (prio_parent && list_is_first(&child->list, &prio_parent->children))
2150 return find_closest_ft(&prio->node, false, false);
2151
2152 return find_next_chained_ft(&prio->node);
2153 }
2154
update_root_ft_destroy(struct mlx5_flow_table * ft)2155 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2156 {
2157 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2158 struct mlx5_ft_underlay_qp *uqp;
2159 struct mlx5_flow_table *new_root_ft = NULL;
2160 int err = 0;
2161 u32 qpn;
2162
2163 if (root->root_ft != ft)
2164 return 0;
2165
2166 new_root_ft = find_next_ft(ft);
2167 if (!new_root_ft) {
2168 root->root_ft = NULL;
2169 return 0;
2170 }
2171
2172 if (list_empty(&root->underlay_qpns)) {
2173 /* Don't set any QPN (zero) in case QPN list is empty */
2174 qpn = 0;
2175 err = root->cmds->update_root_ft(root, new_root_ft,
2176 qpn, false);
2177 } else {
2178 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2179 qpn = uqp->qpn;
2180 err = root->cmds->update_root_ft(root,
2181 new_root_ft, qpn,
2182 false);
2183 if (err)
2184 break;
2185 }
2186 }
2187
2188 if (err)
2189 mlx5_core_warn(root->dev,
2190 "Update root flow table of id(%u) qpn(%d) failed\n",
2191 ft->id, qpn);
2192 else
2193 root->root_ft = new_root_ft;
2194
2195 return 0;
2196 }
2197
2198 /* Connect flow table from previous priority to
2199 * the next flow table.
2200 */
disconnect_flow_table(struct mlx5_flow_table * ft)2201 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2202 {
2203 struct mlx5_core_dev *dev = get_dev(&ft->node);
2204 struct mlx5_flow_table *next_ft;
2205 struct fs_prio *prio;
2206 int err = 0;
2207
2208 err = update_root_ft_destroy(ft);
2209 if (err)
2210 return err;
2211
2212 fs_get_obj(prio, ft->node.parent);
2213 if (!(list_first_entry(&prio->node.children,
2214 struct mlx5_flow_table,
2215 node.list) == ft))
2216 return 0;
2217
2218 next_ft = find_next_ft(ft);
2219 err = connect_fwd_rules(dev, next_ft, ft);
2220 if (err)
2221 return err;
2222
2223 err = connect_prev_fts(dev, next_ft, prio);
2224 if (err)
2225 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2226 ft->id);
2227 return err;
2228 }
2229
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2230 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2231 {
2232 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2233 int err = 0;
2234
2235 mutex_lock(&root->chain_lock);
2236 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2237 err = disconnect_flow_table(ft);
2238 if (err) {
2239 mutex_unlock(&root->chain_lock);
2240 return err;
2241 }
2242 if (tree_remove_node(&ft->node, false))
2243 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2244 ft->id);
2245 mutex_unlock(&root->chain_lock);
2246
2247 return err;
2248 }
2249 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2250
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2251 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2252 {
2253 if (tree_remove_node(&fg->node, false))
2254 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2255 fg->id);
2256 }
2257 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2258
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2259 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2260 int n)
2261 {
2262 struct mlx5_flow_steering *steering = dev->priv.steering;
2263
2264 if (!steering || !steering->fdb_sub_ns)
2265 return NULL;
2266
2267 return steering->fdb_sub_ns[n];
2268 }
2269 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2270
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2271 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2272 enum mlx5_flow_namespace_type type)
2273 {
2274 struct mlx5_flow_steering *steering = dev->priv.steering;
2275 struct mlx5_flow_root_namespace *root_ns;
2276 int prio = 0;
2277 struct fs_prio *fs_prio;
2278 struct mlx5_flow_namespace *ns;
2279
2280 if (!steering)
2281 return NULL;
2282
2283 switch (type) {
2284 case MLX5_FLOW_NAMESPACE_FDB:
2285 if (steering->fdb_root_ns)
2286 return &steering->fdb_root_ns->ns;
2287 return NULL;
2288 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2289 if (steering->sniffer_rx_root_ns)
2290 return &steering->sniffer_rx_root_ns->ns;
2291 return NULL;
2292 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2293 if (steering->sniffer_tx_root_ns)
2294 return &steering->sniffer_tx_root_ns->ns;
2295 return NULL;
2296 default:
2297 break;
2298 }
2299
2300 if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2301 type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2302 root_ns = steering->egress_root_ns;
2303 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2304 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2305 root_ns = steering->rdma_rx_root_ns;
2306 prio = RDMA_RX_BYPASS_PRIO;
2307 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2308 root_ns = steering->rdma_rx_root_ns;
2309 prio = RDMA_RX_KERNEL_PRIO;
2310 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2311 root_ns = steering->rdma_tx_root_ns;
2312 } else { /* Must be NIC RX */
2313 root_ns = steering->root_ns;
2314 prio = type;
2315 }
2316
2317 if (!root_ns)
2318 return NULL;
2319
2320 fs_prio = find_prio(&root_ns->ns, prio);
2321 if (!fs_prio)
2322 return NULL;
2323
2324 ns = list_first_entry(&fs_prio->node.children,
2325 typeof(*ns),
2326 node.list);
2327
2328 return ns;
2329 }
2330 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2331
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2332 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2333 enum mlx5_flow_namespace_type type,
2334 int vport)
2335 {
2336 struct mlx5_flow_steering *steering = dev->priv.steering;
2337
2338 if (!steering)
2339 return NULL;
2340
2341 switch (type) {
2342 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2343 if (vport >= steering->esw_egress_acl_vports)
2344 return NULL;
2345 if (steering->esw_egress_root_ns &&
2346 steering->esw_egress_root_ns[vport])
2347 return &steering->esw_egress_root_ns[vport]->ns;
2348 else
2349 return NULL;
2350 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2351 if (vport >= steering->esw_ingress_acl_vports)
2352 return NULL;
2353 if (steering->esw_ingress_root_ns &&
2354 steering->esw_ingress_root_ns[vport])
2355 return &steering->esw_ingress_root_ns[vport]->ns;
2356 else
2357 return NULL;
2358 default:
2359 return NULL;
2360 }
2361 }
2362
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2363 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2364 unsigned int prio,
2365 int num_levels,
2366 enum fs_node_type type)
2367 {
2368 struct fs_prio *fs_prio;
2369
2370 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2371 if (!fs_prio)
2372 return ERR_PTR(-ENOMEM);
2373
2374 fs_prio->node.type = type;
2375 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2376 tree_add_node(&fs_prio->node, &ns->node);
2377 fs_prio->num_levels = num_levels;
2378 fs_prio->prio = prio;
2379 list_add_tail(&fs_prio->node.list, &ns->node.children);
2380
2381 return fs_prio;
2382 }
2383
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2384 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2385 unsigned int prio,
2386 int num_levels)
2387 {
2388 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2389 }
2390
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2391 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2392 unsigned int prio, int num_levels)
2393 {
2394 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2395 }
2396
fs_init_namespace(struct mlx5_flow_namespace * ns)2397 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2398 *ns)
2399 {
2400 ns->node.type = FS_TYPE_NAMESPACE;
2401
2402 return ns;
2403 }
2404
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2405 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2406 int def_miss_act)
2407 {
2408 struct mlx5_flow_namespace *ns;
2409
2410 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2411 if (!ns)
2412 return ERR_PTR(-ENOMEM);
2413
2414 fs_init_namespace(ns);
2415 ns->def_miss_action = def_miss_act;
2416 tree_init_node(&ns->node, NULL, del_sw_ns);
2417 tree_add_node(&ns->node, &prio->node);
2418 list_add_tail(&ns->node.list, &prio->node.children);
2419
2420 return ns;
2421 }
2422
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2423 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2424 struct init_tree_node *prio_metadata)
2425 {
2426 struct fs_prio *fs_prio;
2427 int i;
2428
2429 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2430 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2431 if (IS_ERR(fs_prio))
2432 return PTR_ERR(fs_prio);
2433 }
2434 return 0;
2435 }
2436
2437 #define FLOW_TABLE_BIT_SZ 1
2438 #define GET_FLOW_TABLE_CAP(dev, offset) \
2439 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2440 offset / 32)) >> \
2441 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2442 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2443 {
2444 int i;
2445
2446 for (i = 0; i < caps->arr_sz; i++) {
2447 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2448 return false;
2449 }
2450 return true;
2451 }
2452
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2453 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2454 struct init_tree_node *init_node,
2455 struct fs_node *fs_parent_node,
2456 struct init_tree_node *init_parent_node,
2457 int prio)
2458 {
2459 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2460 flow_table_properties_nic_receive.
2461 max_ft_level);
2462 struct mlx5_flow_namespace *fs_ns;
2463 struct fs_prio *fs_prio;
2464 struct fs_node *base;
2465 int i;
2466 int err;
2467
2468 if (init_node->type == FS_TYPE_PRIO) {
2469 if ((init_node->min_ft_level > max_ft_level) ||
2470 !has_required_caps(steering->dev, &init_node->caps))
2471 return 0;
2472
2473 fs_get_obj(fs_ns, fs_parent_node);
2474 if (init_node->num_leaf_prios)
2475 return create_leaf_prios(fs_ns, prio, init_node);
2476 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2477 if (IS_ERR(fs_prio))
2478 return PTR_ERR(fs_prio);
2479 base = &fs_prio->node;
2480 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2481 fs_get_obj(fs_prio, fs_parent_node);
2482 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2483 if (IS_ERR(fs_ns))
2484 return PTR_ERR(fs_ns);
2485 base = &fs_ns->node;
2486 } else {
2487 return -EINVAL;
2488 }
2489 prio = 0;
2490 for (i = 0; i < init_node->ar_size; i++) {
2491 err = init_root_tree_recursive(steering, &init_node->children[i],
2492 base, init_node, prio);
2493 if (err)
2494 return err;
2495 if (init_node->children[i].type == FS_TYPE_PRIO &&
2496 init_node->children[i].num_leaf_prios) {
2497 prio += init_node->children[i].num_leaf_prios;
2498 }
2499 }
2500
2501 return 0;
2502 }
2503
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2504 static int init_root_tree(struct mlx5_flow_steering *steering,
2505 struct init_tree_node *init_node,
2506 struct fs_node *fs_parent_node)
2507 {
2508 int err;
2509 int i;
2510
2511 for (i = 0; i < init_node->ar_size; i++) {
2512 err = init_root_tree_recursive(steering, &init_node->children[i],
2513 fs_parent_node,
2514 init_node, i);
2515 if (err)
2516 return err;
2517 }
2518 return 0;
2519 }
2520
del_sw_root_ns(struct fs_node * node)2521 static void del_sw_root_ns(struct fs_node *node)
2522 {
2523 struct mlx5_flow_root_namespace *root_ns;
2524 struct mlx5_flow_namespace *ns;
2525
2526 fs_get_obj(ns, node);
2527 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2528 mutex_destroy(&root_ns->chain_lock);
2529 kfree(node);
2530 }
2531
2532 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2533 *create_root_ns(struct mlx5_flow_steering *steering,
2534 enum fs_flow_table_type table_type)
2535 {
2536 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2537 struct mlx5_flow_root_namespace *root_ns;
2538 struct mlx5_flow_namespace *ns;
2539
2540 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2541 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2542 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2543
2544 /* Create the root namespace */
2545 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2546 if (!root_ns)
2547 return NULL;
2548
2549 root_ns->dev = steering->dev;
2550 root_ns->table_type = table_type;
2551 root_ns->cmds = cmds;
2552
2553 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2554
2555 ns = &root_ns->ns;
2556 fs_init_namespace(ns);
2557 mutex_init(&root_ns->chain_lock);
2558 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2559 tree_add_node(&ns->node, NULL);
2560
2561 return root_ns;
2562 }
2563
2564 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2565
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2566 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2567 {
2568 struct fs_prio *prio;
2569
2570 fs_for_each_prio(prio, ns) {
2571 /* This updates prio start_level and num_levels */
2572 set_prio_attrs_in_prio(prio, acc_level);
2573 acc_level += prio->num_levels;
2574 }
2575 return acc_level;
2576 }
2577
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2578 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2579 {
2580 struct mlx5_flow_namespace *ns;
2581 int acc_level_ns = acc_level;
2582
2583 prio->start_level = acc_level;
2584 fs_for_each_ns(ns, prio) {
2585 /* This updates start_level and num_levels of ns's priority descendants */
2586 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2587
2588 /* If this a prio with chains, and we can jump from one chain
2589 * (namespace) to another, so we accumulate the levels
2590 */
2591 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2592 acc_level = acc_level_ns;
2593 }
2594
2595 if (!prio->num_levels)
2596 prio->num_levels = acc_level_ns - prio->start_level;
2597 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2598 }
2599
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2600 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2601 {
2602 struct mlx5_flow_namespace *ns = &root_ns->ns;
2603 struct fs_prio *prio;
2604 int start_level = 0;
2605
2606 fs_for_each_prio(prio, ns) {
2607 set_prio_attrs_in_prio(prio, start_level);
2608 start_level += prio->num_levels;
2609 }
2610 }
2611
2612 #define ANCHOR_PRIO 0
2613 #define ANCHOR_SIZE 1
2614 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2615 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2616 {
2617 struct mlx5_flow_namespace *ns = NULL;
2618 struct mlx5_flow_table_attr ft_attr = {};
2619 struct mlx5_flow_table *ft;
2620
2621 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2622 if (WARN_ON(!ns))
2623 return -EINVAL;
2624
2625 ft_attr.max_fte = ANCHOR_SIZE;
2626 ft_attr.level = ANCHOR_LEVEL;
2627 ft_attr.prio = ANCHOR_PRIO;
2628
2629 ft = mlx5_create_flow_table(ns, &ft_attr);
2630 if (IS_ERR(ft)) {
2631 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2632 return PTR_ERR(ft);
2633 }
2634 return 0;
2635 }
2636
init_root_ns(struct mlx5_flow_steering * steering)2637 static int init_root_ns(struct mlx5_flow_steering *steering)
2638 {
2639 int err;
2640
2641 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2642 if (!steering->root_ns)
2643 return -ENOMEM;
2644
2645 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2646 if (err)
2647 goto out_err;
2648
2649 set_prio_attrs(steering->root_ns);
2650 err = create_anchor_flow_table(steering);
2651 if (err)
2652 goto out_err;
2653
2654 return 0;
2655
2656 out_err:
2657 cleanup_root_ns(steering->root_ns);
2658 steering->root_ns = NULL;
2659 return err;
2660 }
2661
clean_tree(struct fs_node * node)2662 static void clean_tree(struct fs_node *node)
2663 {
2664 if (node) {
2665 struct fs_node *iter;
2666 struct fs_node *temp;
2667
2668 tree_get_node(node);
2669 list_for_each_entry_safe(iter, temp, &node->children, list)
2670 clean_tree(iter);
2671 tree_put_node(node, false);
2672 tree_remove_node(node, false);
2673 }
2674 }
2675
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2676 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2677 {
2678 if (!root_ns)
2679 return;
2680
2681 clean_tree(&root_ns->ns.node);
2682 }
2683
mlx5_cleanup_fs(struct mlx5_core_dev * dev)2684 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2685 {
2686 struct mlx5_flow_steering *steering = dev->priv.steering;
2687
2688 cleanup_root_ns(steering->root_ns);
2689 cleanup_root_ns(steering->fdb_root_ns);
2690 steering->fdb_root_ns = NULL;
2691 kfree(steering->fdb_sub_ns);
2692 steering->fdb_sub_ns = NULL;
2693 cleanup_root_ns(steering->sniffer_rx_root_ns);
2694 cleanup_root_ns(steering->sniffer_tx_root_ns);
2695 cleanup_root_ns(steering->rdma_rx_root_ns);
2696 cleanup_root_ns(steering->rdma_tx_root_ns);
2697 cleanup_root_ns(steering->egress_root_ns);
2698 mlx5_cleanup_fc_stats(dev);
2699 kmem_cache_destroy(steering->ftes_cache);
2700 kmem_cache_destroy(steering->fgs_cache);
2701 mlx5_ft_pool_destroy(dev);
2702 kfree(steering);
2703 }
2704
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2705 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2706 {
2707 struct fs_prio *prio;
2708
2709 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2710 if (!steering->sniffer_tx_root_ns)
2711 return -ENOMEM;
2712
2713 /* Create single prio */
2714 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2715 return PTR_ERR_OR_ZERO(prio);
2716 }
2717
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2718 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2719 {
2720 struct fs_prio *prio;
2721
2722 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2723 if (!steering->sniffer_rx_root_ns)
2724 return -ENOMEM;
2725
2726 /* Create single prio */
2727 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2728 return PTR_ERR_OR_ZERO(prio);
2729 }
2730
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2731 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2732 {
2733 int err;
2734
2735 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2736 if (!steering->rdma_rx_root_ns)
2737 return -ENOMEM;
2738
2739 err = init_root_tree(steering, &rdma_rx_root_fs,
2740 &steering->rdma_rx_root_ns->ns.node);
2741 if (err)
2742 goto out_err;
2743
2744 set_prio_attrs(steering->rdma_rx_root_ns);
2745
2746 return 0;
2747
2748 out_err:
2749 cleanup_root_ns(steering->rdma_rx_root_ns);
2750 steering->rdma_rx_root_ns = NULL;
2751 return err;
2752 }
2753
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2754 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2755 {
2756 int err;
2757
2758 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2759 if (!steering->rdma_tx_root_ns)
2760 return -ENOMEM;
2761
2762 err = init_root_tree(steering, &rdma_tx_root_fs,
2763 &steering->rdma_tx_root_ns->ns.node);
2764 if (err)
2765 goto out_err;
2766
2767 set_prio_attrs(steering->rdma_tx_root_ns);
2768
2769 return 0;
2770
2771 out_err:
2772 cleanup_root_ns(steering->rdma_tx_root_ns);
2773 steering->rdma_tx_root_ns = NULL;
2774 return err;
2775 }
2776
2777 /* FT and tc chains are stored in the same array so we can re-use the
2778 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2779 * When creating a new ns for each chain store it in the first available slot.
2780 * Assume tc chains are created and stored first and only then the FT chain.
2781 */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2782 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2783 struct mlx5_flow_namespace *ns)
2784 {
2785 int chain = 0;
2786
2787 while (steering->fdb_sub_ns[chain])
2788 ++chain;
2789
2790 steering->fdb_sub_ns[chain] = ns;
2791 }
2792
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2793 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2794 struct fs_prio *maj_prio)
2795 {
2796 struct mlx5_flow_namespace *ns;
2797 struct fs_prio *min_prio;
2798 int prio;
2799
2800 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2801 if (IS_ERR(ns))
2802 return PTR_ERR(ns);
2803
2804 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2805 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2806 if (IS_ERR(min_prio))
2807 return PTR_ERR(min_prio);
2808 }
2809
2810 store_fdb_sub_ns_prio_chain(steering, ns);
2811
2812 return 0;
2813 }
2814
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2815 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2816 int fs_prio,
2817 int chains)
2818 {
2819 struct fs_prio *maj_prio;
2820 int levels;
2821 int chain;
2822 int err;
2823
2824 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2825 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2826 fs_prio,
2827 levels);
2828 if (IS_ERR(maj_prio))
2829 return PTR_ERR(maj_prio);
2830
2831 for (chain = 0; chain < chains; chain++) {
2832 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2833 if (err)
2834 return err;
2835 }
2836
2837 return 0;
2838 }
2839
create_fdb_fast_path(struct mlx5_flow_steering * steering)2840 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2841 {
2842 int err;
2843
2844 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2845 sizeof(*steering->fdb_sub_ns),
2846 GFP_KERNEL);
2847 if (!steering->fdb_sub_ns)
2848 return -ENOMEM;
2849
2850 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2851 if (err)
2852 return err;
2853
2854 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2855 if (err)
2856 return err;
2857
2858 return 0;
2859 }
2860
init_fdb_root_ns(struct mlx5_flow_steering * steering)2861 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2862 {
2863 struct fs_prio *maj_prio;
2864 int err;
2865
2866 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2867 if (!steering->fdb_root_ns)
2868 return -ENOMEM;
2869
2870 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2871 1);
2872 if (IS_ERR(maj_prio)) {
2873 err = PTR_ERR(maj_prio);
2874 goto out_err;
2875 }
2876 err = create_fdb_fast_path(steering);
2877 if (err)
2878 goto out_err;
2879
2880 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2881 if (IS_ERR(maj_prio)) {
2882 err = PTR_ERR(maj_prio);
2883 goto out_err;
2884 }
2885
2886 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2887 if (IS_ERR(maj_prio)) {
2888 err = PTR_ERR(maj_prio);
2889 goto out_err;
2890 }
2891
2892 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2893 if (IS_ERR(maj_prio)) {
2894 err = PTR_ERR(maj_prio);
2895 goto out_err;
2896 }
2897
2898 /* We put this priority last, knowing that nothing will get here
2899 * unless explicitly forwarded to. This is possible because the
2900 * slow path tables have catch all rules and nothing gets passed
2901 * those tables.
2902 */
2903 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2904 if (IS_ERR(maj_prio)) {
2905 err = PTR_ERR(maj_prio);
2906 goto out_err;
2907 }
2908
2909 set_prio_attrs(steering->fdb_root_ns);
2910 return 0;
2911
2912 out_err:
2913 cleanup_root_ns(steering->fdb_root_ns);
2914 kfree(steering->fdb_sub_ns);
2915 steering->fdb_sub_ns = NULL;
2916 steering->fdb_root_ns = NULL;
2917 return err;
2918 }
2919
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2920 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2921 {
2922 struct fs_prio *prio;
2923
2924 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2925 if (!steering->esw_egress_root_ns[vport])
2926 return -ENOMEM;
2927
2928 /* create 1 prio*/
2929 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2930 return PTR_ERR_OR_ZERO(prio);
2931 }
2932
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2933 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2934 {
2935 struct fs_prio *prio;
2936
2937 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2938 if (!steering->esw_ingress_root_ns[vport])
2939 return -ENOMEM;
2940
2941 /* create 1 prio*/
2942 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2943 return PTR_ERR_OR_ZERO(prio);
2944 }
2945
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)2946 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2947 {
2948 struct mlx5_flow_steering *steering = dev->priv.steering;
2949 int err;
2950 int i;
2951
2952 steering->esw_egress_root_ns =
2953 kcalloc(total_vports,
2954 sizeof(*steering->esw_egress_root_ns),
2955 GFP_KERNEL);
2956 if (!steering->esw_egress_root_ns)
2957 return -ENOMEM;
2958
2959 for (i = 0; i < total_vports; i++) {
2960 err = init_egress_acl_root_ns(steering, i);
2961 if (err)
2962 goto cleanup_root_ns;
2963 }
2964 steering->esw_egress_acl_vports = total_vports;
2965 return 0;
2966
2967 cleanup_root_ns:
2968 for (i--; i >= 0; i--)
2969 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2970 kfree(steering->esw_egress_root_ns);
2971 steering->esw_egress_root_ns = NULL;
2972 return err;
2973 }
2974
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)2975 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2976 {
2977 struct mlx5_flow_steering *steering = dev->priv.steering;
2978 int i;
2979
2980 if (!steering->esw_egress_root_ns)
2981 return;
2982
2983 for (i = 0; i < steering->esw_egress_acl_vports; i++)
2984 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2985
2986 kfree(steering->esw_egress_root_ns);
2987 steering->esw_egress_root_ns = NULL;
2988 }
2989
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)2990 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2991 {
2992 struct mlx5_flow_steering *steering = dev->priv.steering;
2993 int err;
2994 int i;
2995
2996 steering->esw_ingress_root_ns =
2997 kcalloc(total_vports,
2998 sizeof(*steering->esw_ingress_root_ns),
2999 GFP_KERNEL);
3000 if (!steering->esw_ingress_root_ns)
3001 return -ENOMEM;
3002
3003 for (i = 0; i < total_vports; i++) {
3004 err = init_ingress_acl_root_ns(steering, i);
3005 if (err)
3006 goto cleanup_root_ns;
3007 }
3008 steering->esw_ingress_acl_vports = total_vports;
3009 return 0;
3010
3011 cleanup_root_ns:
3012 for (i--; i >= 0; i--)
3013 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3014 kfree(steering->esw_ingress_root_ns);
3015 steering->esw_ingress_root_ns = NULL;
3016 return err;
3017 }
3018
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)3019 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3020 {
3021 struct mlx5_flow_steering *steering = dev->priv.steering;
3022 int i;
3023
3024 if (!steering->esw_ingress_root_ns)
3025 return;
3026
3027 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3028 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3029
3030 kfree(steering->esw_ingress_root_ns);
3031 steering->esw_ingress_root_ns = NULL;
3032 }
3033
init_egress_root_ns(struct mlx5_flow_steering * steering)3034 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3035 {
3036 int err;
3037
3038 steering->egress_root_ns = create_root_ns(steering,
3039 FS_FT_NIC_TX);
3040 if (!steering->egress_root_ns)
3041 return -ENOMEM;
3042
3043 err = init_root_tree(steering, &egress_root_fs,
3044 &steering->egress_root_ns->ns.node);
3045 if (err)
3046 goto cleanup;
3047 set_prio_attrs(steering->egress_root_ns);
3048 return 0;
3049 cleanup:
3050 cleanup_root_ns(steering->egress_root_ns);
3051 steering->egress_root_ns = NULL;
3052 return err;
3053 }
3054
mlx5_init_fs(struct mlx5_core_dev * dev)3055 int mlx5_init_fs(struct mlx5_core_dev *dev)
3056 {
3057 struct mlx5_flow_steering *steering;
3058 int err = 0;
3059
3060 err = mlx5_init_fc_stats(dev);
3061 if (err)
3062 return err;
3063
3064 err = mlx5_ft_pool_init(dev);
3065 if (err)
3066 return err;
3067
3068 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3069 if (!steering) {
3070 err = -ENOMEM;
3071 goto err;
3072 }
3073
3074 steering->dev = dev;
3075 dev->priv.steering = steering;
3076
3077 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3078 sizeof(struct mlx5_flow_group), 0,
3079 0, NULL);
3080 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3081 0, NULL);
3082 if (!steering->ftes_cache || !steering->fgs_cache) {
3083 err = -ENOMEM;
3084 goto err;
3085 }
3086
3087 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3088 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3089 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3090 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3091 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3092 err = init_root_ns(steering);
3093 if (err)
3094 goto err;
3095 }
3096
3097 if (MLX5_ESWITCH_MANAGER(dev)) {
3098 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3099 err = init_fdb_root_ns(steering);
3100 if (err)
3101 goto err;
3102 }
3103 }
3104
3105 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3106 err = init_sniffer_rx_root_ns(steering);
3107 if (err)
3108 goto err;
3109 }
3110
3111 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3112 err = init_sniffer_tx_root_ns(steering);
3113 if (err)
3114 goto err;
3115 }
3116
3117 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3118 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3119 err = init_rdma_rx_root_ns(steering);
3120 if (err)
3121 goto err;
3122 }
3123
3124 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3125 err = init_rdma_tx_root_ns(steering);
3126 if (err)
3127 goto err;
3128 }
3129
3130 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3131 MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3132 err = init_egress_root_ns(steering);
3133 if (err)
3134 goto err;
3135 }
3136
3137 return 0;
3138 err:
3139 mlx5_cleanup_fs(dev);
3140 return err;
3141 }
3142
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3143 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3144 {
3145 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3146 struct mlx5_ft_underlay_qp *new_uqp;
3147 int err = 0;
3148
3149 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3150 if (!new_uqp)
3151 return -ENOMEM;
3152
3153 mutex_lock(&root->chain_lock);
3154
3155 if (!root->root_ft) {
3156 err = -EINVAL;
3157 goto update_ft_fail;
3158 }
3159
3160 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3161 false);
3162 if (err) {
3163 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3164 underlay_qpn, err);
3165 goto update_ft_fail;
3166 }
3167
3168 new_uqp->qpn = underlay_qpn;
3169 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3170
3171 mutex_unlock(&root->chain_lock);
3172
3173 return 0;
3174
3175 update_ft_fail:
3176 mutex_unlock(&root->chain_lock);
3177 kfree(new_uqp);
3178 return err;
3179 }
3180 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3181
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3182 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3183 {
3184 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3185 struct mlx5_ft_underlay_qp *uqp;
3186 bool found = false;
3187 int err = 0;
3188
3189 mutex_lock(&root->chain_lock);
3190 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3191 if (uqp->qpn == underlay_qpn) {
3192 found = true;
3193 break;
3194 }
3195 }
3196
3197 if (!found) {
3198 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3199 underlay_qpn);
3200 err = -EINVAL;
3201 goto out;
3202 }
3203
3204 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3205 true);
3206 if (err)
3207 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3208 underlay_qpn, err);
3209
3210 list_del(&uqp->list);
3211 mutex_unlock(&root->chain_lock);
3212 kfree(uqp);
3213
3214 return 0;
3215
3216 out:
3217 mutex_unlock(&root->chain_lock);
3218 return err;
3219 }
3220 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3221
3222 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3223 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3224 {
3225 struct mlx5_flow_namespace *ns;
3226
3227 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3228 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3229 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3230 else
3231 ns = mlx5_get_flow_namespace(dev, ns_type);
3232 if (!ns)
3233 return NULL;
3234
3235 return find_root(&ns->node);
3236 }
3237
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3238 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3239 u8 ns_type, u8 num_actions,
3240 void *modify_actions)
3241 {
3242 struct mlx5_flow_root_namespace *root;
3243 struct mlx5_modify_hdr *modify_hdr;
3244 int err;
3245
3246 root = get_root_namespace(dev, ns_type);
3247 if (!root)
3248 return ERR_PTR(-EOPNOTSUPP);
3249
3250 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3251 if (!modify_hdr)
3252 return ERR_PTR(-ENOMEM);
3253
3254 modify_hdr->ns_type = ns_type;
3255 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3256 modify_actions, modify_hdr);
3257 if (err) {
3258 kfree(modify_hdr);
3259 return ERR_PTR(err);
3260 }
3261
3262 return modify_hdr;
3263 }
3264 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3265
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3266 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3267 struct mlx5_modify_hdr *modify_hdr)
3268 {
3269 struct mlx5_flow_root_namespace *root;
3270
3271 root = get_root_namespace(dev, modify_hdr->ns_type);
3272 if (WARN_ON(!root))
3273 return;
3274 root->cmds->modify_header_dealloc(root, modify_hdr);
3275 kfree(modify_hdr);
3276 }
3277 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3278
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type ns_type)3279 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3280 struct mlx5_pkt_reformat_params *params,
3281 enum mlx5_flow_namespace_type ns_type)
3282 {
3283 struct mlx5_pkt_reformat *pkt_reformat;
3284 struct mlx5_flow_root_namespace *root;
3285 int err;
3286
3287 root = get_root_namespace(dev, ns_type);
3288 if (!root)
3289 return ERR_PTR(-EOPNOTSUPP);
3290
3291 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3292 if (!pkt_reformat)
3293 return ERR_PTR(-ENOMEM);
3294
3295 pkt_reformat->ns_type = ns_type;
3296 pkt_reformat->reformat_type = params->type;
3297 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3298 pkt_reformat);
3299 if (err) {
3300 kfree(pkt_reformat);
3301 return ERR_PTR(err);
3302 }
3303
3304 return pkt_reformat;
3305 }
3306 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3307
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3308 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3309 struct mlx5_pkt_reformat *pkt_reformat)
3310 {
3311 struct mlx5_flow_root_namespace *root;
3312
3313 root = get_root_namespace(dev, pkt_reformat->ns_type);
3314 if (WARN_ON(!root))
3315 return;
3316 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3317 kfree(pkt_reformat);
3318 }
3319 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3320
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3321 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3322 struct mlx5_flow_root_namespace *peer_ns)
3323 {
3324 if (peer_ns && ns->mode != peer_ns->mode) {
3325 mlx5_core_err(ns->dev,
3326 "Can't peer namespace of different steering mode\n");
3327 return -EINVAL;
3328 }
3329
3330 return ns->cmds->set_peer(ns, peer_ns);
3331 }
3332
3333 /* This function should be called only at init stage of the namespace.
3334 * It is not safe to call this function while steering operations
3335 * are executed in the namespace.
3336 */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3337 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3338 enum mlx5_flow_steering_mode mode)
3339 {
3340 struct mlx5_flow_root_namespace *root;
3341 const struct mlx5_flow_cmds *cmds;
3342 int err;
3343
3344 root = find_root(&ns->node);
3345 if (&root->ns != ns)
3346 /* Can't set cmds to non root namespace */
3347 return -EINVAL;
3348
3349 if (root->table_type != FS_FT_FDB)
3350 return -EOPNOTSUPP;
3351
3352 if (root->mode == mode)
3353 return 0;
3354
3355 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3356 cmds = mlx5_fs_cmd_get_dr_cmds();
3357 else
3358 cmds = mlx5_fs_cmd_get_fw_cmds();
3359 if (!cmds)
3360 return -EOPNOTSUPP;
3361
3362 err = cmds->create_ns(root);
3363 if (err) {
3364 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3365 err);
3366 return err;
3367 }
3368
3369 root->cmds->destroy_ns(root);
3370 root->cmds = cmds;
3371 root->mode = mode;
3372
3373 return 0;
3374 }
3375