• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
44 
45 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
46 					 sizeof(struct init_tree_node))
47 
48 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
49 		 ...) {.type = FS_TYPE_PRIO,\
50 	.min_ft_level = min_level_val,\
51 	.num_levels = num_levels_val,\
52 	.num_leaf_prios = num_prios_val,\
53 	.caps = caps_val,\
54 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
55 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
56 }
57 
58 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
59 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
60 		 __VA_ARGS__)\
61 
62 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
63 	.def_miss_action = def_miss_act,\
64 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
65 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
66 }
67 
68 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
69 				   sizeof(long))
70 
71 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
72 
73 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
74 			       .caps = (long[]) {__VA_ARGS__} }
75 
76 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
77 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
78 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
79 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
80 
81 #define FS_CHAINING_CAPS_EGRESS                                                \
82 	FS_REQUIRED_CAPS(                                                      \
83 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
84 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
85 		FS_CAP(flow_table_properties_nic_transmit                      \
86 			       .identified_miss_table_mode),                   \
87 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88 
89 #define FS_CHAINING_CAPS_RDMA_TX                                                \
90 	FS_REQUIRED_CAPS(                                                       \
91 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
94 			       .identified_miss_table_mode),                    \
95 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
96 			       .flow_table_modify))
97 
98 #define LEFTOVERS_NUM_LEVELS 1
99 #define LEFTOVERS_NUM_PRIOS 1
100 
101 #define BY_PASS_PRIO_NUM_LEVELS 1
102 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
103 			   LEFTOVERS_NUM_PRIOS)
104 
105 #define ETHTOOL_PRIO_NUM_LEVELS 1
106 #define ETHTOOL_NUM_PRIOS 11
107 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
108 /* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
109 #define KERNEL_NIC_PRIO_NUM_LEVELS 6
110 #define KERNEL_NIC_NUM_PRIOS 1
111 /* One more level for tc */
112 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
113 
114 #define KERNEL_NIC_TC_NUM_PRIOS  1
115 #define KERNEL_NIC_TC_NUM_LEVELS 2
116 
117 #define ANCHOR_NUM_LEVELS 1
118 #define ANCHOR_NUM_PRIOS 1
119 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
120 
121 #define OFFLOADS_MAX_FT 2
122 #define OFFLOADS_NUM_PRIOS 2
123 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
124 
125 #define LAG_PRIO_NUM_LEVELS 1
126 #define LAG_NUM_PRIOS 1
127 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
128 
129 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
130 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
131 #define KERNEL_TX_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
132 
133 struct node_caps {
134 	size_t	arr_sz;
135 	long	*caps;
136 };
137 
138 static struct init_tree_node {
139 	enum fs_node_type	type;
140 	struct init_tree_node *children;
141 	int ar_size;
142 	struct node_caps caps;
143 	int min_ft_level;
144 	int num_leaf_prios;
145 	int prio;
146 	int num_levels;
147 	enum mlx5_flow_table_miss_action def_miss_action;
148 } root_fs = {
149 	.type = FS_TYPE_NAMESPACE,
150 	.ar_size = 7,
151 	  .children = (struct init_tree_node[]){
152 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
153 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
154 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
155 						    BY_PASS_PRIO_NUM_LEVELS))),
156 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
159 						    LAG_PRIO_NUM_LEVELS))),
160 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
163 						    OFFLOADS_MAX_FT))),
164 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
167 						    ETHTOOL_PRIO_NUM_LEVELS))),
168 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
169 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
171 						    KERNEL_NIC_TC_NUM_LEVELS),
172 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
173 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
174 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
177 						    LEFTOVERS_NUM_LEVELS))),
178 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
179 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
181 						    ANCHOR_NUM_LEVELS))),
182 	}
183 };
184 
185 static struct init_tree_node egress_root_fs = {
186 	.type = FS_TYPE_NAMESPACE,
187 #ifdef CONFIG_MLX5_IPSEC
188 	.ar_size = 2,
189 #else
190 	.ar_size = 1,
191 #endif
192 	.children = (struct init_tree_node[]) {
193 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
194 			 FS_CHAINING_CAPS_EGRESS,
195 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
196 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
197 						  BY_PASS_PRIO_NUM_LEVELS))),
198 #ifdef CONFIG_MLX5_IPSEC
199 		ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
200 			 FS_CHAINING_CAPS_EGRESS,
201 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
202 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
203 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
204 #endif
205 	}
206 };
207 
208 #define RDMA_RX_BYPASS_PRIO 0
209 #define RDMA_RX_KERNEL_PRIO 1
210 static struct init_tree_node rdma_rx_root_fs = {
211 	.type = FS_TYPE_NAMESPACE,
212 	.ar_size = 2,
213 	.children = (struct init_tree_node[]) {
214 		[RDMA_RX_BYPASS_PRIO] =
215 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
216 			 FS_CHAINING_CAPS,
217 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
218 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
219 						  BY_PASS_PRIO_NUM_LEVELS))),
220 		[RDMA_RX_KERNEL_PRIO] =
221 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
222 			 FS_CHAINING_CAPS,
223 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
224 				ADD_MULTIPLE_PRIO(1, 1))),
225 	}
226 };
227 
228 static struct init_tree_node rdma_tx_root_fs = {
229 	.type = FS_TYPE_NAMESPACE,
230 	.ar_size = 1,
231 	.children = (struct init_tree_node[]) {
232 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
233 			 FS_CHAINING_CAPS_RDMA_TX,
234 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
235 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
236 						  BY_PASS_PRIO_NUM_LEVELS))),
237 	}
238 };
239 
240 enum fs_i_lock_class {
241 	FS_LOCK_GRANDPARENT,
242 	FS_LOCK_PARENT,
243 	FS_LOCK_CHILD
244 };
245 
246 static const struct rhashtable_params rhash_fte = {
247 	.key_len = sizeof_field(struct fs_fte, val),
248 	.key_offset = offsetof(struct fs_fte, val),
249 	.head_offset = offsetof(struct fs_fte, hash),
250 	.automatic_shrinking = true,
251 	.min_size = 1,
252 };
253 
254 static const struct rhashtable_params rhash_fg = {
255 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
256 	.key_offset = offsetof(struct mlx5_flow_group, mask),
257 	.head_offset = offsetof(struct mlx5_flow_group, hash),
258 	.automatic_shrinking = true,
259 	.min_size = 1,
260 
261 };
262 
263 static void del_hw_flow_table(struct fs_node *node);
264 static void del_hw_flow_group(struct fs_node *node);
265 static void del_hw_fte(struct fs_node *node);
266 static void del_sw_flow_table(struct fs_node *node);
267 static void del_sw_flow_group(struct fs_node *node);
268 static void del_sw_fte(struct fs_node *node);
269 static void del_sw_prio(struct fs_node *node);
270 static void del_sw_ns(struct fs_node *node);
271 /* Delete rule (destination) is special case that
272  * requires to lock the FTE for all the deletion process.
273  */
274 static void del_sw_hw_rule(struct fs_node *node);
275 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
276 				struct mlx5_flow_destination *d2);
277 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
278 static struct mlx5_flow_rule *
279 find_flow_rule(struct fs_fte *fte,
280 	       struct mlx5_flow_destination *dest);
281 
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))282 static void tree_init_node(struct fs_node *node,
283 			   void (*del_hw_func)(struct fs_node *),
284 			   void (*del_sw_func)(struct fs_node *))
285 {
286 	refcount_set(&node->refcount, 1);
287 	INIT_LIST_HEAD(&node->list);
288 	INIT_LIST_HEAD(&node->children);
289 	init_rwsem(&node->lock);
290 	node->del_hw_func = del_hw_func;
291 	node->del_sw_func = del_sw_func;
292 	node->active = false;
293 }
294 
tree_add_node(struct fs_node * node,struct fs_node * parent)295 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
296 {
297 	if (parent)
298 		refcount_inc(&parent->refcount);
299 	node->parent = parent;
300 
301 	/* Parent is the root */
302 	if (!parent)
303 		node->root = node;
304 	else
305 		node->root = parent->root;
306 }
307 
tree_get_node(struct fs_node * node)308 static int tree_get_node(struct fs_node *node)
309 {
310 	return refcount_inc_not_zero(&node->refcount);
311 }
312 
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)313 static void nested_down_read_ref_node(struct fs_node *node,
314 				      enum fs_i_lock_class class)
315 {
316 	if (node) {
317 		down_read_nested(&node->lock, class);
318 		refcount_inc(&node->refcount);
319 	}
320 }
321 
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)322 static void nested_down_write_ref_node(struct fs_node *node,
323 				       enum fs_i_lock_class class)
324 {
325 	if (node) {
326 		down_write_nested(&node->lock, class);
327 		refcount_inc(&node->refcount);
328 	}
329 }
330 
down_write_ref_node(struct fs_node * node,bool locked)331 static void down_write_ref_node(struct fs_node *node, bool locked)
332 {
333 	if (node) {
334 		if (!locked)
335 			down_write(&node->lock);
336 		refcount_inc(&node->refcount);
337 	}
338 }
339 
up_read_ref_node(struct fs_node * node)340 static void up_read_ref_node(struct fs_node *node)
341 {
342 	refcount_dec(&node->refcount);
343 	up_read(&node->lock);
344 }
345 
up_write_ref_node(struct fs_node * node,bool locked)346 static void up_write_ref_node(struct fs_node *node, bool locked)
347 {
348 	refcount_dec(&node->refcount);
349 	if (!locked)
350 		up_write(&node->lock);
351 }
352 
tree_put_node(struct fs_node * node,bool locked)353 static void tree_put_node(struct fs_node *node, bool locked)
354 {
355 	struct fs_node *parent_node = node->parent;
356 
357 	if (refcount_dec_and_test(&node->refcount)) {
358 		if (node->del_hw_func)
359 			node->del_hw_func(node);
360 		if (parent_node) {
361 			down_write_ref_node(parent_node, locked);
362 			list_del_init(&node->list);
363 		}
364 		node->del_sw_func(node);
365 		if (parent_node)
366 			up_write_ref_node(parent_node, locked);
367 		node = NULL;
368 	}
369 	if (!node && parent_node)
370 		tree_put_node(parent_node, locked);
371 }
372 
tree_remove_node(struct fs_node * node,bool locked)373 static int tree_remove_node(struct fs_node *node, bool locked)
374 {
375 	if (refcount_read(&node->refcount) > 1) {
376 		refcount_dec(&node->refcount);
377 		return -EEXIST;
378 	}
379 	tree_put_node(node, locked);
380 	return 0;
381 }
382 
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)383 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
384 				 unsigned int prio)
385 {
386 	struct fs_prio *iter_prio;
387 
388 	fs_for_each_prio(iter_prio, ns) {
389 		if (iter_prio->prio == prio)
390 			return iter_prio;
391 	}
392 
393 	return NULL;
394 }
395 
is_fwd_next_action(u32 action)396 static bool is_fwd_next_action(u32 action)
397 {
398 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
399 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
400 }
401 
check_valid_spec(const struct mlx5_flow_spec * spec)402 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
403 {
404 	int i;
405 
406 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
407 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
408 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
409 			return false;
410 		}
411 
412 	return true;
413 }
414 
find_root(struct fs_node * node)415 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
416 {
417 	struct fs_node *root;
418 	struct mlx5_flow_namespace *ns;
419 
420 	root = node->root;
421 
422 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
423 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
424 		return NULL;
425 	}
426 
427 	ns = container_of(root, struct mlx5_flow_namespace, node);
428 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
429 }
430 
get_steering(struct fs_node * node)431 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
432 {
433 	struct mlx5_flow_root_namespace *root = find_root(node);
434 
435 	if (root)
436 		return root->dev->priv.steering;
437 	return NULL;
438 }
439 
get_dev(struct fs_node * node)440 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
441 {
442 	struct mlx5_flow_root_namespace *root = find_root(node);
443 
444 	if (root)
445 		return root->dev;
446 	return NULL;
447 }
448 
del_sw_ns(struct fs_node * node)449 static void del_sw_ns(struct fs_node *node)
450 {
451 	kfree(node);
452 }
453 
del_sw_prio(struct fs_node * node)454 static void del_sw_prio(struct fs_node *node)
455 {
456 	kfree(node);
457 }
458 
del_hw_flow_table(struct fs_node * node)459 static void del_hw_flow_table(struct fs_node *node)
460 {
461 	struct mlx5_flow_root_namespace *root;
462 	struct mlx5_flow_table *ft;
463 	struct mlx5_core_dev *dev;
464 	int err;
465 
466 	fs_get_obj(ft, node);
467 	dev = get_dev(&ft->node);
468 	root = find_root(&ft->node);
469 	trace_mlx5_fs_del_ft(ft);
470 
471 	if (node->active) {
472 		err = root->cmds->destroy_flow_table(root, ft);
473 		if (err)
474 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
475 	}
476 }
477 
del_sw_flow_table(struct fs_node * node)478 static void del_sw_flow_table(struct fs_node *node)
479 {
480 	struct mlx5_flow_table *ft;
481 	struct fs_prio *prio;
482 
483 	fs_get_obj(ft, node);
484 
485 	rhltable_destroy(&ft->fgs_hash);
486 	if (ft->node.parent) {
487 		fs_get_obj(prio, ft->node.parent);
488 		prio->num_ft--;
489 	}
490 	kfree(ft);
491 }
492 
modify_fte(struct fs_fte * fte)493 static void modify_fte(struct fs_fte *fte)
494 {
495 	struct mlx5_flow_root_namespace *root;
496 	struct mlx5_flow_table *ft;
497 	struct mlx5_flow_group *fg;
498 	struct mlx5_core_dev *dev;
499 	int err;
500 
501 	fs_get_obj(fg, fte->node.parent);
502 	fs_get_obj(ft, fg->node.parent);
503 	dev = get_dev(&fte->node);
504 
505 	root = find_root(&ft->node);
506 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
507 	if (err)
508 		mlx5_core_warn(dev,
509 			       "%s can't del rule fg id=%d fte_index=%d\n",
510 			       __func__, fg->id, fte->index);
511 	fte->modify_mask = 0;
512 }
513 
del_sw_hw_rule(struct fs_node * node)514 static void del_sw_hw_rule(struct fs_node *node)
515 {
516 	struct mlx5_flow_rule *rule;
517 	struct fs_fte *fte;
518 
519 	fs_get_obj(rule, node);
520 	fs_get_obj(fte, rule->node.parent);
521 	trace_mlx5_fs_del_rule(rule);
522 	if (is_fwd_next_action(rule->sw_action)) {
523 		mutex_lock(&rule->dest_attr.ft->lock);
524 		list_del(&rule->next_ft);
525 		mutex_unlock(&rule->dest_attr.ft->lock);
526 	}
527 
528 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
529 	    --fte->dests_size) {
530 		fte->modify_mask |=
531 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
532 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
533 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
534 		goto out;
535 	}
536 
537 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
538 	    --fte->dests_size) {
539 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
540 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
541 		goto out;
542 	}
543 
544 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
545 	    --fte->dests_size) {
546 		fte->modify_mask |=
547 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
548 	}
549 out:
550 	kfree(rule);
551 }
552 
del_hw_fte(struct fs_node * node)553 static void del_hw_fte(struct fs_node *node)
554 {
555 	struct mlx5_flow_root_namespace *root;
556 	struct mlx5_flow_table *ft;
557 	struct mlx5_flow_group *fg;
558 	struct mlx5_core_dev *dev;
559 	struct fs_fte *fte;
560 	int err;
561 
562 	fs_get_obj(fte, node);
563 	fs_get_obj(fg, fte->node.parent);
564 	fs_get_obj(ft, fg->node.parent);
565 
566 	trace_mlx5_fs_del_fte(fte);
567 	dev = get_dev(&ft->node);
568 	root = find_root(&ft->node);
569 	if (node->active) {
570 		err = root->cmds->delete_fte(root, ft, fte);
571 		if (err)
572 			mlx5_core_warn(dev,
573 				       "flow steering can't delete fte in index %d of flow group id %d\n",
574 				       fte->index, fg->id);
575 		node->active = 0;
576 	}
577 }
578 
del_sw_fte(struct fs_node * node)579 static void del_sw_fte(struct fs_node *node)
580 {
581 	struct mlx5_flow_steering *steering = get_steering(node);
582 	struct mlx5_flow_group *fg;
583 	struct fs_fte *fte;
584 	int err;
585 
586 	fs_get_obj(fte, node);
587 	fs_get_obj(fg, fte->node.parent);
588 
589 	err = rhashtable_remove_fast(&fg->ftes_hash,
590 				     &fte->hash,
591 				     rhash_fte);
592 	WARN_ON(err);
593 	ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
594 	kmem_cache_free(steering->ftes_cache, fte);
595 }
596 
del_hw_flow_group(struct fs_node * node)597 static void del_hw_flow_group(struct fs_node *node)
598 {
599 	struct mlx5_flow_root_namespace *root;
600 	struct mlx5_flow_group *fg;
601 	struct mlx5_flow_table *ft;
602 	struct mlx5_core_dev *dev;
603 
604 	fs_get_obj(fg, node);
605 	fs_get_obj(ft, fg->node.parent);
606 	dev = get_dev(&ft->node);
607 	trace_mlx5_fs_del_fg(fg);
608 
609 	root = find_root(&ft->node);
610 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
611 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
612 			       fg->id, ft->id);
613 }
614 
del_sw_flow_group(struct fs_node * node)615 static void del_sw_flow_group(struct fs_node *node)
616 {
617 	struct mlx5_flow_steering *steering = get_steering(node);
618 	struct mlx5_flow_group *fg;
619 	struct mlx5_flow_table *ft;
620 	int err;
621 
622 	fs_get_obj(fg, node);
623 	fs_get_obj(ft, fg->node.parent);
624 
625 	rhashtable_destroy(&fg->ftes_hash);
626 	ida_destroy(&fg->fte_allocator);
627 	if (ft->autogroup.active &&
628 	    fg->max_ftes == ft->autogroup.group_size &&
629 	    fg->start_index < ft->autogroup.max_fte)
630 		ft->autogroup.num_groups--;
631 	err = rhltable_remove(&ft->fgs_hash,
632 			      &fg->hash,
633 			      rhash_fg);
634 	WARN_ON(err);
635 	kmem_cache_free(steering->fgs_cache, fg);
636 }
637 
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)638 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
639 {
640 	int index;
641 	int ret;
642 
643 	index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
644 	if (index < 0)
645 		return index;
646 
647 	fte->index = index + fg->start_index;
648 	ret = rhashtable_insert_fast(&fg->ftes_hash,
649 				     &fte->hash,
650 				     rhash_fte);
651 	if (ret)
652 		goto err_ida_remove;
653 
654 	tree_add_node(&fte->node, &fg->node);
655 	list_add_tail(&fte->node.list, &fg->node.children);
656 	return 0;
657 
658 err_ida_remove:
659 	ida_simple_remove(&fg->fte_allocator, index);
660 	return ret;
661 }
662 
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)663 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
664 				const struct mlx5_flow_spec *spec,
665 				struct mlx5_flow_act *flow_act)
666 {
667 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
668 	struct fs_fte *fte;
669 
670 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
671 	if (!fte)
672 		return ERR_PTR(-ENOMEM);
673 
674 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
675 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
676 	fte->action = *flow_act;
677 	fte->flow_context = spec->flow_context;
678 
679 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
680 
681 	return fte;
682 }
683 
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)684 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
685 			       struct mlx5_flow_group *fg)
686 {
687 	rhashtable_destroy(&fg->ftes_hash);
688 	kmem_cache_free(steering->fgs_cache, fg);
689 }
690 
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)691 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
692 						u8 match_criteria_enable,
693 						const void *match_criteria,
694 						int start_index,
695 						int end_index)
696 {
697 	struct mlx5_flow_group *fg;
698 	int ret;
699 
700 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
701 	if (!fg)
702 		return ERR_PTR(-ENOMEM);
703 
704 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
705 	if (ret) {
706 		kmem_cache_free(steering->fgs_cache, fg);
707 		return ERR_PTR(ret);
708 	}
709 
710 	ida_init(&fg->fte_allocator);
711 	fg->mask.match_criteria_enable = match_criteria_enable;
712 	memcpy(&fg->mask.match_criteria, match_criteria,
713 	       sizeof(fg->mask.match_criteria));
714 	fg->node.type =  FS_TYPE_FLOW_GROUP;
715 	fg->start_index = start_index;
716 	fg->max_ftes = end_index - start_index + 1;
717 
718 	return fg;
719 }
720 
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)721 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
722 						       u8 match_criteria_enable,
723 						       const void *match_criteria,
724 						       int start_index,
725 						       int end_index,
726 						       struct list_head *prev)
727 {
728 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
729 	struct mlx5_flow_group *fg;
730 	int ret;
731 
732 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
733 			      start_index, end_index);
734 	if (IS_ERR(fg))
735 		return fg;
736 
737 	/* initialize refcnt, add to parent list */
738 	ret = rhltable_insert(&ft->fgs_hash,
739 			      &fg->hash,
740 			      rhash_fg);
741 	if (ret) {
742 		dealloc_flow_group(steering, fg);
743 		return ERR_PTR(ret);
744 	}
745 
746 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
747 	tree_add_node(&fg->node, &ft->node);
748 	/* Add node to group list */
749 	list_add(&fg->node.list, prev);
750 	atomic_inc(&ft->node.version);
751 
752 	return fg;
753 }
754 
alloc_flow_table(int level,u16 vport,int max_fte,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)755 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
756 						enum fs_flow_table_type table_type,
757 						enum fs_flow_table_op_mod op_mod,
758 						u32 flags)
759 {
760 	struct mlx5_flow_table *ft;
761 	int ret;
762 
763 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
764 	if (!ft)
765 		return ERR_PTR(-ENOMEM);
766 
767 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
768 	if (ret) {
769 		kfree(ft);
770 		return ERR_PTR(ret);
771 	}
772 
773 	ft->level = level;
774 	ft->node.type = FS_TYPE_FLOW_TABLE;
775 	ft->op_mod = op_mod;
776 	ft->type = table_type;
777 	ft->vport = vport;
778 	ft->max_fte = max_fte;
779 	ft->flags = flags;
780 	INIT_LIST_HEAD(&ft->fwd_rules);
781 	mutex_init(&ft->lock);
782 
783 	return ft;
784 }
785 
786 /* If reverse is false, then we search for the first flow table in the
787  * root sub-tree from start(closest from right), else we search for the
788  * last flow table in the root sub-tree till start(closest from left).
789  */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)790 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
791 							 struct list_head *start,
792 							 bool reverse)
793 {
794 #define list_advance_entry(pos, reverse)		\
795 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
796 
797 #define list_for_each_advance_continue(pos, head, reverse)	\
798 	for (pos = list_advance_entry(pos, reverse);		\
799 	     &pos->list != (head);				\
800 	     pos = list_advance_entry(pos, reverse))
801 
802 	struct fs_node *iter = list_entry(start, struct fs_node, list);
803 	struct mlx5_flow_table *ft = NULL;
804 
805 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
806 		return NULL;
807 
808 	list_for_each_advance_continue(iter, &root->children, reverse) {
809 		if (iter->type == FS_TYPE_FLOW_TABLE) {
810 			fs_get_obj(ft, iter);
811 			return ft;
812 		}
813 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
814 		if (ft)
815 			return ft;
816 	}
817 
818 	return ft;
819 }
820 
821 /* If reverse is false then return the first flow table in next priority of
822  * prio in the tree, else return the last flow table in the previous priority
823  * of prio in the tree.
824  */
find_closest_ft(struct fs_prio * prio,bool reverse)825 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
826 {
827 	struct mlx5_flow_table *ft = NULL;
828 	struct fs_node *curr_node;
829 	struct fs_node *parent;
830 
831 	parent = prio->node.parent;
832 	curr_node = &prio->node;
833 	while (!ft && parent) {
834 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
835 		curr_node = parent;
836 		parent = curr_node->parent;
837 	}
838 	return ft;
839 }
840 
841 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)842 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
843 {
844 	return find_closest_ft(prio, false);
845 }
846 
847 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)848 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
849 {
850 	return find_closest_ft(prio, true);
851 }
852 
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)853 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
854 						struct mlx5_flow_act *flow_act)
855 {
856 	struct fs_prio *prio;
857 	bool next_ns;
858 
859 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
860 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
861 
862 	return find_next_chained_ft(prio);
863 }
864 
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)865 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
866 			       struct fs_prio *prio,
867 			       struct mlx5_flow_table *ft)
868 {
869 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
870 	struct mlx5_flow_table *iter;
871 	int err;
872 
873 	fs_for_each_ft(iter, prio) {
874 		err = root->cmds->modify_flow_table(root, iter, ft);
875 		if (err) {
876 			mlx5_core_err(dev,
877 				      "Failed to modify flow table id %d, type %d, err %d\n",
878 				      iter->id, iter->type, err);
879 			/* The driver is out of sync with the FW */
880 			return err;
881 		}
882 	}
883 	return 0;
884 }
885 
886 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)887 static int connect_prev_fts(struct mlx5_core_dev *dev,
888 			    struct mlx5_flow_table *ft,
889 			    struct fs_prio *prio)
890 {
891 	struct mlx5_flow_table *prev_ft;
892 
893 	prev_ft = find_prev_chained_ft(prio);
894 	if (prev_ft) {
895 		struct fs_prio *prev_prio;
896 
897 		fs_get_obj(prev_prio, prev_ft->node.parent);
898 		return connect_fts_in_prio(dev, prev_prio, ft);
899 	}
900 	return 0;
901 }
902 
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)903 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
904 				 *prio)
905 {
906 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
907 	struct mlx5_ft_underlay_qp *uqp;
908 	int min_level = INT_MAX;
909 	int err = 0;
910 	u32 qpn;
911 
912 	if (root->root_ft)
913 		min_level = root->root_ft->level;
914 
915 	if (ft->level >= min_level)
916 		return 0;
917 
918 	if (list_empty(&root->underlay_qpns)) {
919 		/* Don't set any QPN (zero) in case QPN list is empty */
920 		qpn = 0;
921 		err = root->cmds->update_root_ft(root, ft, qpn, false);
922 	} else {
923 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
924 			qpn = uqp->qpn;
925 			err = root->cmds->update_root_ft(root, ft,
926 							 qpn, false);
927 			if (err)
928 				break;
929 		}
930 	}
931 
932 	if (err)
933 		mlx5_core_warn(root->dev,
934 			       "Update root flow table of id(%u) qpn(%d) failed\n",
935 			       ft->id, qpn);
936 	else
937 		root->root_ft = ft;
938 
939 	return err;
940 }
941 
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)942 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
943 					 struct mlx5_flow_destination *dest)
944 {
945 	struct mlx5_flow_root_namespace *root;
946 	struct mlx5_flow_table *ft;
947 	struct mlx5_flow_group *fg;
948 	struct fs_fte *fte;
949 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
950 	int err = 0;
951 
952 	fs_get_obj(fte, rule->node.parent);
953 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
954 		return -EINVAL;
955 	down_write_ref_node(&fte->node, false);
956 	fs_get_obj(fg, fte->node.parent);
957 	fs_get_obj(ft, fg->node.parent);
958 
959 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
960 	root = find_root(&ft->node);
961 	err = root->cmds->update_fte(root, ft, fg,
962 				     modify_mask, fte);
963 	up_write_ref_node(&fte->node, false);
964 
965 	return err;
966 }
967 
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)968 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
969 				 struct mlx5_flow_destination *new_dest,
970 				 struct mlx5_flow_destination *old_dest)
971 {
972 	int i;
973 
974 	if (!old_dest) {
975 		if (handle->num_rules != 1)
976 			return -EINVAL;
977 		return _mlx5_modify_rule_destination(handle->rule[0],
978 						     new_dest);
979 	}
980 
981 	for (i = 0; i < handle->num_rules; i++) {
982 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
983 			return _mlx5_modify_rule_destination(handle->rule[i],
984 							     new_dest);
985 	}
986 
987 	return -EINVAL;
988 }
989 
990 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)991 static int connect_fwd_rules(struct mlx5_core_dev *dev,
992 			     struct mlx5_flow_table *new_next_ft,
993 			     struct mlx5_flow_table *old_next_ft)
994 {
995 	struct mlx5_flow_destination dest = {};
996 	struct mlx5_flow_rule *iter;
997 	int err = 0;
998 
999 	/* new_next_ft and old_next_ft could be NULL only
1000 	 * when we create/destroy the anchor flow table.
1001 	 */
1002 	if (!new_next_ft || !old_next_ft)
1003 		return 0;
1004 
1005 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1006 	dest.ft = new_next_ft;
1007 
1008 	mutex_lock(&old_next_ft->lock);
1009 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1010 	mutex_unlock(&old_next_ft->lock);
1011 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1012 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1013 		    iter->ft->ns == new_next_ft->ns)
1014 			continue;
1015 
1016 		err = _mlx5_modify_rule_destination(iter, &dest);
1017 		if (err)
1018 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1019 			       new_next_ft->id);
1020 	}
1021 	return 0;
1022 }
1023 
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1024 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1025 			      struct fs_prio *prio)
1026 {
1027 	struct mlx5_flow_table *next_ft, *first_ft;
1028 	int err = 0;
1029 
1030 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1031 
1032 	first_ft = list_first_entry_or_null(&prio->node.children,
1033 					    struct mlx5_flow_table, node.list);
1034 	if (!first_ft || first_ft->level > ft->level) {
1035 		err = connect_prev_fts(dev, ft, prio);
1036 		if (err)
1037 			return err;
1038 
1039 		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1040 		err = connect_fwd_rules(dev, ft, next_ft);
1041 		if (err)
1042 			return err;
1043 	}
1044 
1045 	if (MLX5_CAP_FLOWTABLE(dev,
1046 			       flow_table_properties_nic_receive.modify_root))
1047 		err = update_root_ft_create(ft, prio);
1048 	return err;
1049 }
1050 
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1051 static void list_add_flow_table(struct mlx5_flow_table *ft,
1052 				struct fs_prio *prio)
1053 {
1054 	struct list_head *prev = &prio->node.children;
1055 	struct mlx5_flow_table *iter;
1056 
1057 	fs_for_each_ft(iter, prio) {
1058 		if (iter->level > ft->level)
1059 			break;
1060 		prev = &iter->node.list;
1061 	}
1062 	list_add(&ft->node.list, prev);
1063 }
1064 
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1065 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1066 							struct mlx5_flow_table_attr *ft_attr,
1067 							enum fs_flow_table_op_mod op_mod,
1068 							u16 vport)
1069 {
1070 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1071 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1072 	struct mlx5_flow_table *next_ft;
1073 	struct fs_prio *fs_prio = NULL;
1074 	struct mlx5_flow_table *ft;
1075 	int log_table_sz;
1076 	int err;
1077 
1078 	if (!root) {
1079 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1080 		return ERR_PTR(-ENODEV);
1081 	}
1082 
1083 	mutex_lock(&root->chain_lock);
1084 	fs_prio = find_prio(ns, ft_attr->prio);
1085 	if (!fs_prio) {
1086 		err = -EINVAL;
1087 		goto unlock_root;
1088 	}
1089 	if (!unmanaged) {
1090 		/* The level is related to the
1091 		 * priority level range.
1092 		 */
1093 		if (ft_attr->level >= fs_prio->num_levels) {
1094 			err = -ENOSPC;
1095 			goto unlock_root;
1096 		}
1097 
1098 		ft_attr->level += fs_prio->start_level;
1099 	}
1100 
1101 	/* The level is related to the
1102 	 * priority level range.
1103 	 */
1104 	ft = alloc_flow_table(ft_attr->level,
1105 			      vport,
1106 			      ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1107 			      root->table_type,
1108 			      op_mod, ft_attr->flags);
1109 	if (IS_ERR(ft)) {
1110 		err = PTR_ERR(ft);
1111 		goto unlock_root;
1112 	}
1113 
1114 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1115 	log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1116 	next_ft = unmanaged ? ft_attr->next_ft :
1117 			      find_next_chained_ft(fs_prio);
1118 	ft->def_miss_action = ns->def_miss_action;
1119 	ft->ns = ns;
1120 	err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1121 	if (err)
1122 		goto free_ft;
1123 
1124 	if (!unmanaged) {
1125 		err = connect_flow_table(root->dev, ft, fs_prio);
1126 		if (err)
1127 			goto destroy_ft;
1128 	}
1129 
1130 	ft->node.active = true;
1131 	down_write_ref_node(&fs_prio->node, false);
1132 	if (!unmanaged) {
1133 		tree_add_node(&ft->node, &fs_prio->node);
1134 		list_add_flow_table(ft, fs_prio);
1135 	} else {
1136 		ft->node.root = fs_prio->node.root;
1137 	}
1138 	fs_prio->num_ft++;
1139 	up_write_ref_node(&fs_prio->node, false);
1140 	mutex_unlock(&root->chain_lock);
1141 	trace_mlx5_fs_add_ft(ft);
1142 	return ft;
1143 destroy_ft:
1144 	root->cmds->destroy_flow_table(root, ft);
1145 free_ft:
1146 	rhltable_destroy(&ft->fgs_hash);
1147 	kfree(ft);
1148 unlock_root:
1149 	mutex_unlock(&root->chain_lock);
1150 	return ERR_PTR(err);
1151 }
1152 
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1153 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1154 					       struct mlx5_flow_table_attr *ft_attr)
1155 {
1156 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1157 }
1158 
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,int prio,int max_fte,u32 level,u16 vport)1159 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1160 						     int prio, int max_fte,
1161 						     u32 level, u16 vport)
1162 {
1163 	struct mlx5_flow_table_attr ft_attr = {};
1164 
1165 	ft_attr.max_fte = max_fte;
1166 	ft_attr.level   = level;
1167 	ft_attr.prio    = prio;
1168 
1169 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1170 }
1171 
1172 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1173 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1174 				 int prio, u32 level)
1175 {
1176 	struct mlx5_flow_table_attr ft_attr = {};
1177 
1178 	ft_attr.level = level;
1179 	ft_attr.prio  = prio;
1180 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1181 }
1182 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1183 
1184 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1185 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1186 				    struct mlx5_flow_table_attr *ft_attr)
1187 {
1188 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1189 	int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1190 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1191 	struct mlx5_flow_table *ft;
1192 
1193 	if (max_num_groups > autogroups_max_fte)
1194 		return ERR_PTR(-EINVAL);
1195 	if (num_reserved_entries > ft_attr->max_fte)
1196 		return ERR_PTR(-EINVAL);
1197 
1198 	ft = mlx5_create_flow_table(ns, ft_attr);
1199 	if (IS_ERR(ft))
1200 		return ft;
1201 
1202 	ft->autogroup.active = true;
1203 	ft->autogroup.required_groups = max_num_groups;
1204 	ft->autogroup.max_fte = autogroups_max_fte;
1205 	/* We save place for flow groups in addition to max types */
1206 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1207 
1208 	return ft;
1209 }
1210 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1211 
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1212 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1213 					       u32 *fg_in)
1214 {
1215 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1216 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1217 					    fg_in, match_criteria);
1218 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1219 					    fg_in,
1220 					    match_criteria_enable);
1221 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1222 				   start_flow_index);
1223 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1224 				 end_flow_index);
1225 	struct mlx5_flow_group *fg;
1226 	int err;
1227 
1228 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1229 		return ERR_PTR(-EPERM);
1230 
1231 	down_write_ref_node(&ft->node, false);
1232 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1233 				     start_index, end_index,
1234 				     ft->node.children.prev);
1235 	up_write_ref_node(&ft->node, false);
1236 	if (IS_ERR(fg))
1237 		return fg;
1238 
1239 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1240 	if (err) {
1241 		tree_put_node(&fg->node, false);
1242 		return ERR_PTR(err);
1243 	}
1244 	trace_mlx5_fs_add_fg(fg);
1245 	fg->node.active = true;
1246 
1247 	return fg;
1248 }
1249 
alloc_rule(struct mlx5_flow_destination * dest)1250 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1251 {
1252 	struct mlx5_flow_rule *rule;
1253 
1254 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1255 	if (!rule)
1256 		return NULL;
1257 
1258 	INIT_LIST_HEAD(&rule->next_ft);
1259 	rule->node.type = FS_TYPE_FLOW_DEST;
1260 	if (dest)
1261 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1262 
1263 	return rule;
1264 }
1265 
alloc_handle(int num_rules)1266 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1267 {
1268 	struct mlx5_flow_handle *handle;
1269 
1270 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1271 	if (!handle)
1272 		return NULL;
1273 
1274 	handle->num_rules = num_rules;
1275 
1276 	return handle;
1277 }
1278 
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1279 static void destroy_flow_handle(struct fs_fte *fte,
1280 				struct mlx5_flow_handle *handle,
1281 				struct mlx5_flow_destination *dest,
1282 				int i)
1283 {
1284 	for (; --i >= 0;) {
1285 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1286 			fte->dests_size--;
1287 			list_del(&handle->rule[i]->node.list);
1288 			kfree(handle->rule[i]);
1289 		}
1290 	}
1291 	kfree(handle);
1292 }
1293 
1294 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1295 create_flow_handle(struct fs_fte *fte,
1296 		   struct mlx5_flow_destination *dest,
1297 		   int dest_num,
1298 		   int *modify_mask,
1299 		   bool *new_rule)
1300 {
1301 	struct mlx5_flow_handle *handle;
1302 	struct mlx5_flow_rule *rule = NULL;
1303 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1304 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1305 	int type;
1306 	int i = 0;
1307 
1308 	handle = alloc_handle((dest_num) ? dest_num : 1);
1309 	if (!handle)
1310 		return ERR_PTR(-ENOMEM);
1311 
1312 	do {
1313 		if (dest) {
1314 			rule = find_flow_rule(fte, dest + i);
1315 			if (rule) {
1316 				refcount_inc(&rule->node.refcount);
1317 				goto rule_found;
1318 			}
1319 		}
1320 
1321 		*new_rule = true;
1322 		rule = alloc_rule(dest + i);
1323 		if (!rule)
1324 			goto free_rules;
1325 
1326 		/* Add dest to dests list- we need flow tables to be in the
1327 		 * end of the list for forward to next prio rules.
1328 		 */
1329 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1330 		if (dest &&
1331 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1332 			list_add(&rule->node.list, &fte->node.children);
1333 		else
1334 			list_add_tail(&rule->node.list, &fte->node.children);
1335 		if (dest) {
1336 			fte->dests_size++;
1337 
1338 			type = dest[i].type ==
1339 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1340 			*modify_mask |= type ? count : dst;
1341 		}
1342 rule_found:
1343 		handle->rule[i] = rule;
1344 	} while (++i < dest_num);
1345 
1346 	return handle;
1347 
1348 free_rules:
1349 	destroy_flow_handle(fte, handle, dest, i);
1350 	return ERR_PTR(-ENOMEM);
1351 }
1352 
1353 /* fte should not be deleted while calling this function */
1354 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1355 add_rule_fte(struct fs_fte *fte,
1356 	     struct mlx5_flow_group *fg,
1357 	     struct mlx5_flow_destination *dest,
1358 	     int dest_num,
1359 	     bool update_action)
1360 {
1361 	struct mlx5_flow_root_namespace *root;
1362 	struct mlx5_flow_handle *handle;
1363 	struct mlx5_flow_table *ft;
1364 	int modify_mask = 0;
1365 	int err;
1366 	bool new_rule = false;
1367 
1368 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1369 				    &new_rule);
1370 	if (IS_ERR(handle) || !new_rule)
1371 		goto out;
1372 
1373 	if (update_action)
1374 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1375 
1376 	fs_get_obj(ft, fg->node.parent);
1377 	root = find_root(&fg->node);
1378 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1379 		err = root->cmds->create_fte(root, ft, fg, fte);
1380 	else
1381 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1382 	if (err)
1383 		goto free_handle;
1384 
1385 	fte->node.active = true;
1386 	fte->status |= FS_FTE_STATUS_EXISTING;
1387 	atomic_inc(&fg->node.version);
1388 
1389 out:
1390 	return handle;
1391 
1392 free_handle:
1393 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1394 	return ERR_PTR(err);
1395 }
1396 
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1397 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1398 						     const struct mlx5_flow_spec *spec)
1399 {
1400 	struct list_head *prev = &ft->node.children;
1401 	u32 max_fte = ft->autogroup.max_fte;
1402 	unsigned int candidate_index = 0;
1403 	unsigned int group_size = 0;
1404 	struct mlx5_flow_group *fg;
1405 
1406 	if (!ft->autogroup.active)
1407 		return ERR_PTR(-ENOENT);
1408 
1409 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1410 		group_size = ft->autogroup.group_size;
1411 
1412 	/*  max_fte == ft->autogroup.max_types */
1413 	if (group_size == 0)
1414 		group_size = 1;
1415 
1416 	/* sorted by start_index */
1417 	fs_for_each_fg(fg, ft) {
1418 		if (candidate_index + group_size > fg->start_index)
1419 			candidate_index = fg->start_index + fg->max_ftes;
1420 		else
1421 			break;
1422 		prev = &fg->node.list;
1423 	}
1424 
1425 	if (candidate_index + group_size > max_fte)
1426 		return ERR_PTR(-ENOSPC);
1427 
1428 	fg = alloc_insert_flow_group(ft,
1429 				     spec->match_criteria_enable,
1430 				     spec->match_criteria,
1431 				     candidate_index,
1432 				     candidate_index + group_size - 1,
1433 				     prev);
1434 	if (IS_ERR(fg))
1435 		goto out;
1436 
1437 	if (group_size == ft->autogroup.group_size)
1438 		ft->autogroup.num_groups++;
1439 
1440 out:
1441 	return fg;
1442 }
1443 
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1444 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1445 				  struct mlx5_flow_group *fg)
1446 {
1447 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1448 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1449 	void *match_criteria_addr;
1450 	u8 src_esw_owner_mask_on;
1451 	void *misc;
1452 	int err;
1453 	u32 *in;
1454 
1455 	in = kvzalloc(inlen, GFP_KERNEL);
1456 	if (!in)
1457 		return -ENOMEM;
1458 
1459 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1460 		 fg->mask.match_criteria_enable);
1461 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1462 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1463 		 fg->max_ftes - 1);
1464 
1465 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1466 			    misc_parameters);
1467 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1468 					 source_eswitch_owner_vhca_id);
1469 	MLX5_SET(create_flow_group_in, in,
1470 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1471 
1472 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1473 					   in, match_criteria);
1474 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1475 	       sizeof(fg->mask.match_criteria));
1476 
1477 	err = root->cmds->create_flow_group(root, ft, in, fg);
1478 	if (!err) {
1479 		fg->node.active = true;
1480 		trace_mlx5_fs_add_fg(fg);
1481 	}
1482 
1483 	kvfree(in);
1484 	return err;
1485 }
1486 
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1487 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1488 				struct mlx5_flow_destination *d2)
1489 {
1490 	if (d1->type == d2->type) {
1491 		if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1492 		     d1->vport.num == d2->vport.num &&
1493 		     d1->vport.flags == d2->vport.flags &&
1494 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1495 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1496 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1497 		      (d1->vport.pkt_reformat->id ==
1498 		       d2->vport.pkt_reformat->id) : true)) ||
1499 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1500 		     d1->ft == d2->ft) ||
1501 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1502 		     d1->tir_num == d2->tir_num) ||
1503 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1504 		     d1->ft_num == d2->ft_num))
1505 			return true;
1506 	}
1507 
1508 	return false;
1509 }
1510 
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1511 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1512 					     struct mlx5_flow_destination *dest)
1513 {
1514 	struct mlx5_flow_rule *rule;
1515 
1516 	list_for_each_entry(rule, &fte->node.children, node.list) {
1517 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1518 			return rule;
1519 	}
1520 	return NULL;
1521 }
1522 
check_conflicting_actions(u32 action1,u32 action2)1523 static bool check_conflicting_actions(u32 action1, u32 action2)
1524 {
1525 	u32 xored_actions = action1 ^ action2;
1526 
1527 	/* if one rule only wants to count, it's ok */
1528 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1529 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1530 		return false;
1531 
1532 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1533 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1534 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1535 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1536 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1537 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1538 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1539 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1540 		return true;
1541 
1542 	return false;
1543 }
1544 
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1545 static int check_conflicting_ftes(struct fs_fte *fte,
1546 				  const struct mlx5_flow_context *flow_context,
1547 				  const struct mlx5_flow_act *flow_act)
1548 {
1549 	if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1550 		mlx5_core_warn(get_dev(&fte->node),
1551 			       "Found two FTEs with conflicting actions\n");
1552 		return -EEXIST;
1553 	}
1554 
1555 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1556 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1557 		mlx5_core_warn(get_dev(&fte->node),
1558 			       "FTE flow tag %u already exists with different flow tag %u\n",
1559 			       fte->flow_context.flow_tag,
1560 			       flow_context->flow_tag);
1561 		return -EEXIST;
1562 	}
1563 
1564 	return 0;
1565 }
1566 
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1567 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1568 					    const struct mlx5_flow_spec *spec,
1569 					    struct mlx5_flow_act *flow_act,
1570 					    struct mlx5_flow_destination *dest,
1571 					    int dest_num,
1572 					    struct fs_fte *fte)
1573 {
1574 	struct mlx5_flow_handle *handle;
1575 	int old_action;
1576 	int i;
1577 	int ret;
1578 
1579 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1580 	if (ret)
1581 		return ERR_PTR(ret);
1582 
1583 	old_action = fte->action.action;
1584 	fte->action.action |= flow_act->action;
1585 	handle = add_rule_fte(fte, fg, dest, dest_num,
1586 			      old_action != flow_act->action);
1587 	if (IS_ERR(handle)) {
1588 		fte->action.action = old_action;
1589 		return handle;
1590 	}
1591 	trace_mlx5_fs_set_fte(fte, false);
1592 
1593 	for (i = 0; i < handle->num_rules; i++) {
1594 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1595 			tree_add_node(&handle->rule[i]->node, &fte->node);
1596 			trace_mlx5_fs_add_rule(handle->rule[i]);
1597 		}
1598 	}
1599 	return handle;
1600 }
1601 
counter_is_valid(u32 action)1602 static bool counter_is_valid(u32 action)
1603 {
1604 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1605 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1606 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1607 }
1608 
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1609 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1610 			  struct mlx5_flow_act *flow_act,
1611 			  struct mlx5_flow_table *ft)
1612 {
1613 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1614 	u32 action = flow_act->action;
1615 
1616 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1617 		return counter_is_valid(action);
1618 
1619 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1620 		return true;
1621 
1622 	if (ignore_level) {
1623 		if (ft->type != FS_FT_FDB &&
1624 		    ft->type != FS_FT_NIC_RX)
1625 			return false;
1626 
1627 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1628 		    ft->type != dest->ft->type)
1629 			return false;
1630 	}
1631 
1632 	if (!dest || ((dest->type ==
1633 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1634 	    (dest->ft->level <= ft->level && !ignore_level)))
1635 		return false;
1636 	return true;
1637 }
1638 
1639 struct match_list {
1640 	struct list_head	list;
1641 	struct mlx5_flow_group *g;
1642 };
1643 
free_match_list(struct match_list * head,bool ft_locked)1644 static void free_match_list(struct match_list *head, bool ft_locked)
1645 {
1646 	struct match_list *iter, *match_tmp;
1647 
1648 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1649 				 list) {
1650 		tree_put_node(&iter->g->node, ft_locked);
1651 		list_del(&iter->list);
1652 		kfree(iter);
1653 	}
1654 }
1655 
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,bool ft_locked)1656 static int build_match_list(struct match_list *match_head,
1657 			    struct mlx5_flow_table *ft,
1658 			    const struct mlx5_flow_spec *spec,
1659 			    bool ft_locked)
1660 {
1661 	struct rhlist_head *tmp, *list;
1662 	struct mlx5_flow_group *g;
1663 	int err = 0;
1664 
1665 	rcu_read_lock();
1666 	INIT_LIST_HEAD(&match_head->list);
1667 	/* Collect all fgs which has a matching match_criteria */
1668 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1669 	/* RCU is atomic, we can't execute FW commands here */
1670 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1671 		struct match_list *curr_match;
1672 
1673 		if (unlikely(!tree_get_node(&g->node)))
1674 			continue;
1675 
1676 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1677 		if (!curr_match) {
1678 			rcu_read_unlock();
1679 			free_match_list(match_head, ft_locked);
1680 			return -ENOMEM;
1681 		}
1682 		curr_match->g = g;
1683 		list_add_tail(&curr_match->list, &match_head->list);
1684 	}
1685 	rcu_read_unlock();
1686 	return err;
1687 }
1688 
matched_fgs_get_version(struct list_head * match_head)1689 static u64 matched_fgs_get_version(struct list_head *match_head)
1690 {
1691 	struct match_list *iter;
1692 	u64 version = 0;
1693 
1694 	list_for_each_entry(iter, match_head, list)
1695 		version += (u64)atomic_read(&iter->g->node.version);
1696 	return version;
1697 }
1698 
1699 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1700 lookup_fte_locked(struct mlx5_flow_group *g,
1701 		  const u32 *match_value,
1702 		  bool take_write)
1703 {
1704 	struct fs_fte *fte_tmp;
1705 
1706 	if (take_write)
1707 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1708 	else
1709 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1710 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1711 					 rhash_fte);
1712 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1713 		fte_tmp = NULL;
1714 		goto out;
1715 	}
1716 	if (!fte_tmp->node.active) {
1717 		tree_put_node(&fte_tmp->node, false);
1718 		fte_tmp = NULL;
1719 		goto out;
1720 	}
1721 
1722 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1723 out:
1724 	if (take_write)
1725 		up_write_ref_node(&g->node, false);
1726 	else
1727 		up_read_ref_node(&g->node);
1728 	return fte_tmp;
1729 }
1730 
1731 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1732 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1733 		       struct list_head *match_head,
1734 		       const struct mlx5_flow_spec *spec,
1735 		       struct mlx5_flow_act *flow_act,
1736 		       struct mlx5_flow_destination *dest,
1737 		       int dest_num,
1738 		       int ft_version)
1739 {
1740 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1741 	struct mlx5_flow_group *g;
1742 	struct mlx5_flow_handle *rule;
1743 	struct match_list *iter;
1744 	bool take_write = false;
1745 	struct fs_fte *fte;
1746 	u64  version = 0;
1747 	int err;
1748 
1749 	fte = alloc_fte(ft, spec, flow_act);
1750 	if (IS_ERR(fte))
1751 		return  ERR_PTR(-ENOMEM);
1752 
1753 search_again_locked:
1754 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1755 		goto skip_search;
1756 	version = matched_fgs_get_version(match_head);
1757 	/* Try to find an fte with identical match value and attempt update its
1758 	 * action.
1759 	 */
1760 	list_for_each_entry(iter, match_head, list) {
1761 		struct fs_fte *fte_tmp;
1762 
1763 		g = iter->g;
1764 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1765 		if (!fte_tmp)
1766 			continue;
1767 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1768 		/* No error check needed here, because insert_fte() is not called */
1769 		up_write_ref_node(&fte_tmp->node, false);
1770 		tree_put_node(&fte_tmp->node, false);
1771 		kmem_cache_free(steering->ftes_cache, fte);
1772 		return rule;
1773 	}
1774 
1775 skip_search:
1776 	/* No group with matching fte found, or we skipped the search.
1777 	 * Try to add a new fte to any matching fg.
1778 	 */
1779 
1780 	/* Check the ft version, for case that new flow group
1781 	 * was added while the fgs weren't locked
1782 	 */
1783 	if (atomic_read(&ft->node.version) != ft_version) {
1784 		rule = ERR_PTR(-EAGAIN);
1785 		goto out;
1786 	}
1787 
1788 	/* Check the fgs version. If version have changed it could be that an
1789 	 * FTE with the same match value was added while the fgs weren't
1790 	 * locked.
1791 	 */
1792 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1793 	    version != matched_fgs_get_version(match_head)) {
1794 		take_write = true;
1795 		goto search_again_locked;
1796 	}
1797 
1798 	list_for_each_entry(iter, match_head, list) {
1799 		g = iter->g;
1800 
1801 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1802 
1803 		if (!g->node.active) {
1804 			up_write_ref_node(&g->node, false);
1805 			continue;
1806 		}
1807 
1808 		err = insert_fte(g, fte);
1809 		if (err) {
1810 			up_write_ref_node(&g->node, false);
1811 			if (err == -ENOSPC)
1812 				continue;
1813 			kmem_cache_free(steering->ftes_cache, fte);
1814 			return ERR_PTR(err);
1815 		}
1816 
1817 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1818 		up_write_ref_node(&g->node, false);
1819 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1820 		up_write_ref_node(&fte->node, false);
1821 		if (IS_ERR(rule))
1822 			tree_put_node(&fte->node, false);
1823 		return rule;
1824 	}
1825 	rule = ERR_PTR(-ENOENT);
1826 out:
1827 	kmem_cache_free(steering->ftes_cache, fte);
1828 	return rule;
1829 }
1830 
1831 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1832 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1833 		     const struct mlx5_flow_spec *spec,
1834 		     struct mlx5_flow_act *flow_act,
1835 		     struct mlx5_flow_destination *dest,
1836 		     int dest_num)
1837 
1838 {
1839 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1840 	struct mlx5_flow_handle *rule;
1841 	struct match_list match_head;
1842 	struct mlx5_flow_group *g;
1843 	bool take_write = false;
1844 	struct fs_fte *fte;
1845 	int version;
1846 	int err;
1847 	int i;
1848 
1849 	if (!check_valid_spec(spec))
1850 		return ERR_PTR(-EINVAL);
1851 
1852 	for (i = 0; i < dest_num; i++) {
1853 		if (!dest_is_valid(&dest[i], flow_act, ft))
1854 			return ERR_PTR(-EINVAL);
1855 	}
1856 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1857 search_again_locked:
1858 	version = atomic_read(&ft->node.version);
1859 
1860 	/* Collect all fgs which has a matching match_criteria */
1861 	err = build_match_list(&match_head, ft, spec, take_write);
1862 	if (err) {
1863 		if (take_write)
1864 			up_write_ref_node(&ft->node, false);
1865 		else
1866 			up_read_ref_node(&ft->node);
1867 		return ERR_PTR(err);
1868 	}
1869 
1870 	if (!take_write)
1871 		up_read_ref_node(&ft->node);
1872 
1873 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1874 				      dest_num, version);
1875 	free_match_list(&match_head, take_write);
1876 	if (!IS_ERR(rule) ||
1877 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1878 		if (take_write)
1879 			up_write_ref_node(&ft->node, false);
1880 		return rule;
1881 	}
1882 
1883 	if (!take_write) {
1884 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1885 		take_write = true;
1886 	}
1887 
1888 	if (PTR_ERR(rule) == -EAGAIN ||
1889 	    version != atomic_read(&ft->node.version))
1890 		goto search_again_locked;
1891 
1892 	g = alloc_auto_flow_group(ft, spec);
1893 	if (IS_ERR(g)) {
1894 		rule = ERR_CAST(g);
1895 		up_write_ref_node(&ft->node, false);
1896 		return rule;
1897 	}
1898 
1899 	fte = alloc_fte(ft, spec, flow_act);
1900 	if (IS_ERR(fte)) {
1901 		up_write_ref_node(&ft->node, false);
1902 		err = PTR_ERR(fte);
1903 		goto err_alloc_fte;
1904 	}
1905 
1906 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1907 	up_write_ref_node(&ft->node, false);
1908 
1909 	err = create_auto_flow_group(ft, g);
1910 	if (err)
1911 		goto err_release_fg;
1912 
1913 	err = insert_fte(g, fte);
1914 	if (err)
1915 		goto err_release_fg;
1916 
1917 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1918 	up_write_ref_node(&g->node, false);
1919 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1920 	up_write_ref_node(&fte->node, false);
1921 	if (IS_ERR(rule))
1922 		tree_put_node(&fte->node, false);
1923 	tree_put_node(&g->node, false);
1924 	return rule;
1925 
1926 err_release_fg:
1927 	up_write_ref_node(&g->node, false);
1928 	kmem_cache_free(steering->ftes_cache, fte);
1929 err_alloc_fte:
1930 	tree_put_node(&g->node, false);
1931 	return ERR_PTR(err);
1932 }
1933 
fwd_next_prio_supported(struct mlx5_flow_table * ft)1934 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1935 {
1936 	return ((ft->type == FS_FT_NIC_RX) &&
1937 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1938 }
1939 
1940 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1941 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1942 		    const struct mlx5_flow_spec *spec,
1943 		    struct mlx5_flow_act *flow_act,
1944 		    struct mlx5_flow_destination *dest,
1945 		    int num_dest)
1946 {
1947 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1948 	static const struct mlx5_flow_spec zero_spec = {};
1949 	struct mlx5_flow_destination *gen_dest = NULL;
1950 	struct mlx5_flow_table *next_ft = NULL;
1951 	struct mlx5_flow_handle *handle = NULL;
1952 	u32 sw_action = flow_act->action;
1953 	int i;
1954 
1955 	if (!spec)
1956 		spec = &zero_spec;
1957 
1958 	if (!is_fwd_next_action(sw_action))
1959 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1960 
1961 	if (!fwd_next_prio_supported(ft))
1962 		return ERR_PTR(-EOPNOTSUPP);
1963 
1964 	mutex_lock(&root->chain_lock);
1965 	next_ft = find_next_fwd_ft(ft, flow_act);
1966 	if (!next_ft) {
1967 		handle = ERR_PTR(-EOPNOTSUPP);
1968 		goto unlock;
1969 	}
1970 
1971 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
1972 			   GFP_KERNEL);
1973 	if (!gen_dest) {
1974 		handle = ERR_PTR(-ENOMEM);
1975 		goto unlock;
1976 	}
1977 	for (i = 0; i < num_dest; i++)
1978 		gen_dest[i] = dest[i];
1979 	gen_dest[i].type =
1980 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1981 	gen_dest[i].ft = next_ft;
1982 	dest = gen_dest;
1983 	num_dest++;
1984 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
1985 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
1986 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1987 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1988 	if (IS_ERR(handle))
1989 		goto unlock;
1990 
1991 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
1992 		mutex_lock(&next_ft->lock);
1993 		list_add(&handle->rule[num_dest - 1]->next_ft,
1994 			 &next_ft->fwd_rules);
1995 		mutex_unlock(&next_ft->lock);
1996 		handle->rule[num_dest - 1]->sw_action = sw_action;
1997 		handle->rule[num_dest - 1]->ft = ft;
1998 	}
1999 unlock:
2000 	mutex_unlock(&root->chain_lock);
2001 	kfree(gen_dest);
2002 	return handle;
2003 }
2004 EXPORT_SYMBOL(mlx5_add_flow_rules);
2005 
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2006 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2007 {
2008 	struct fs_fte *fte;
2009 	int i;
2010 
2011 	/* In order to consolidate the HW changes we lock the FTE for other
2012 	 * changes, and increase its refcount, in order not to perform the
2013 	 * "del" functions of the FTE. Will handle them here.
2014 	 * The removal of the rules is done under locked FTE.
2015 	 * After removing all the handle's rules, if there are remaining
2016 	 * rules, it means we just need to modify the FTE in FW, and
2017 	 * unlock/decrease the refcount we increased before.
2018 	 * Otherwise, it means the FTE should be deleted. First delete the
2019 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2020 	 * the FTE, which will handle the last decrease of the refcount, as
2021 	 * well as required handling of its parent.
2022 	 */
2023 	fs_get_obj(fte, handle->rule[0]->node.parent);
2024 	down_write_ref_node(&fte->node, false);
2025 	for (i = handle->num_rules - 1; i >= 0; i--)
2026 		tree_remove_node(&handle->rule[i]->node, true);
2027 	if (fte->dests_size) {
2028 		if (fte->modify_mask)
2029 			modify_fte(fte);
2030 		up_write_ref_node(&fte->node, false);
2031 	} else if (list_empty(&fte->node.children)) {
2032 		del_hw_fte(&fte->node);
2033 		/* Avoid double call to del_hw_fte */
2034 		fte->node.del_hw_func = NULL;
2035 		up_write_ref_node(&fte->node, false);
2036 		tree_put_node(&fte->node, false);
2037 	}
2038 	kfree(handle);
2039 }
2040 EXPORT_SYMBOL(mlx5_del_flow_rules);
2041 
2042 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2043 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2044 {
2045 	struct fs_prio *prio;
2046 
2047 	fs_get_obj(prio, ft->node.parent);
2048 
2049 	if (!list_is_last(&ft->node.list, &prio->node.children))
2050 		return list_next_entry(ft, node.list);
2051 	return find_next_chained_ft(prio);
2052 }
2053 
update_root_ft_destroy(struct mlx5_flow_table * ft)2054 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2055 {
2056 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2057 	struct mlx5_ft_underlay_qp *uqp;
2058 	struct mlx5_flow_table *new_root_ft = NULL;
2059 	int err = 0;
2060 	u32 qpn;
2061 
2062 	if (root->root_ft != ft)
2063 		return 0;
2064 
2065 	new_root_ft = find_next_ft(ft);
2066 	if (!new_root_ft) {
2067 		root->root_ft = NULL;
2068 		return 0;
2069 	}
2070 
2071 	if (list_empty(&root->underlay_qpns)) {
2072 		/* Don't set any QPN (zero) in case QPN list is empty */
2073 		qpn = 0;
2074 		err = root->cmds->update_root_ft(root, new_root_ft,
2075 						 qpn, false);
2076 	} else {
2077 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2078 			qpn = uqp->qpn;
2079 			err = root->cmds->update_root_ft(root,
2080 							 new_root_ft, qpn,
2081 							 false);
2082 			if (err)
2083 				break;
2084 		}
2085 	}
2086 
2087 	if (err)
2088 		mlx5_core_warn(root->dev,
2089 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2090 			       ft->id, qpn);
2091 	else
2092 		root->root_ft = new_root_ft;
2093 
2094 	return 0;
2095 }
2096 
2097 /* Connect flow table from previous priority to
2098  * the next flow table.
2099  */
disconnect_flow_table(struct mlx5_flow_table * ft)2100 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2101 {
2102 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2103 	struct mlx5_flow_table *next_ft;
2104 	struct fs_prio *prio;
2105 	int err = 0;
2106 
2107 	err = update_root_ft_destroy(ft);
2108 	if (err)
2109 		return err;
2110 
2111 	fs_get_obj(prio, ft->node.parent);
2112 	if  (!(list_first_entry(&prio->node.children,
2113 				struct mlx5_flow_table,
2114 				node.list) == ft))
2115 		return 0;
2116 
2117 	next_ft = find_next_ft(ft);
2118 	err = connect_fwd_rules(dev, next_ft, ft);
2119 	if (err)
2120 		return err;
2121 
2122 	err = connect_prev_fts(dev, next_ft, prio);
2123 	if (err)
2124 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2125 			       ft->id);
2126 	return err;
2127 }
2128 
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2129 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2130 {
2131 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2132 	int err = 0;
2133 
2134 	mutex_lock(&root->chain_lock);
2135 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2136 		err = disconnect_flow_table(ft);
2137 	if (err) {
2138 		mutex_unlock(&root->chain_lock);
2139 		return err;
2140 	}
2141 	if (tree_remove_node(&ft->node, false))
2142 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2143 			       ft->id);
2144 	mutex_unlock(&root->chain_lock);
2145 
2146 	return err;
2147 }
2148 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2149 
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2150 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2151 {
2152 	if (tree_remove_node(&fg->node, false))
2153 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2154 			       fg->id);
2155 }
2156 
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2157 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2158 						int n)
2159 {
2160 	struct mlx5_flow_steering *steering = dev->priv.steering;
2161 
2162 	if (!steering || !steering->fdb_sub_ns)
2163 		return NULL;
2164 
2165 	return steering->fdb_sub_ns[n];
2166 }
2167 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2168 
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2169 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2170 						    enum mlx5_flow_namespace_type type)
2171 {
2172 	struct mlx5_flow_steering *steering = dev->priv.steering;
2173 	struct mlx5_flow_root_namespace *root_ns;
2174 	int prio = 0;
2175 	struct fs_prio *fs_prio;
2176 	struct mlx5_flow_namespace *ns;
2177 
2178 	if (!steering)
2179 		return NULL;
2180 
2181 	switch (type) {
2182 	case MLX5_FLOW_NAMESPACE_FDB:
2183 		if (steering->fdb_root_ns)
2184 			return &steering->fdb_root_ns->ns;
2185 		return NULL;
2186 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2187 		if (steering->sniffer_rx_root_ns)
2188 			return &steering->sniffer_rx_root_ns->ns;
2189 		return NULL;
2190 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2191 		if (steering->sniffer_tx_root_ns)
2192 			return &steering->sniffer_tx_root_ns->ns;
2193 		return NULL;
2194 	default:
2195 		break;
2196 	}
2197 
2198 	if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2199 	    type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2200 		root_ns = steering->egress_root_ns;
2201 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2202 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2203 		root_ns = steering->rdma_rx_root_ns;
2204 		prio = RDMA_RX_BYPASS_PRIO;
2205 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2206 		root_ns = steering->rdma_rx_root_ns;
2207 		prio = RDMA_RX_KERNEL_PRIO;
2208 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2209 		root_ns = steering->rdma_tx_root_ns;
2210 	} else { /* Must be NIC RX */
2211 		root_ns = steering->root_ns;
2212 		prio = type;
2213 	}
2214 
2215 	if (!root_ns)
2216 		return NULL;
2217 
2218 	fs_prio = find_prio(&root_ns->ns, prio);
2219 	if (!fs_prio)
2220 		return NULL;
2221 
2222 	ns = list_first_entry(&fs_prio->node.children,
2223 			      typeof(*ns),
2224 			      node.list);
2225 
2226 	return ns;
2227 }
2228 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2229 
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2230 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2231 							      enum mlx5_flow_namespace_type type,
2232 							      int vport)
2233 {
2234 	struct mlx5_flow_steering *steering = dev->priv.steering;
2235 
2236 	if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
2237 		return NULL;
2238 
2239 	switch (type) {
2240 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2241 		if (steering->esw_egress_root_ns &&
2242 		    steering->esw_egress_root_ns[vport])
2243 			return &steering->esw_egress_root_ns[vport]->ns;
2244 		else
2245 			return NULL;
2246 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2247 		if (steering->esw_ingress_root_ns &&
2248 		    steering->esw_ingress_root_ns[vport])
2249 			return &steering->esw_ingress_root_ns[vport]->ns;
2250 		else
2251 			return NULL;
2252 	default:
2253 		return NULL;
2254 	}
2255 }
2256 
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2257 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2258 				       unsigned int prio,
2259 				       int num_levels,
2260 				       enum fs_node_type type)
2261 {
2262 	struct fs_prio *fs_prio;
2263 
2264 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2265 	if (!fs_prio)
2266 		return ERR_PTR(-ENOMEM);
2267 
2268 	fs_prio->node.type = type;
2269 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2270 	tree_add_node(&fs_prio->node, &ns->node);
2271 	fs_prio->num_levels = num_levels;
2272 	fs_prio->prio = prio;
2273 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2274 
2275 	return fs_prio;
2276 }
2277 
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2278 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2279 					      unsigned int prio,
2280 					      int num_levels)
2281 {
2282 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2283 }
2284 
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2285 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2286 				      unsigned int prio, int num_levels)
2287 {
2288 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2289 }
2290 
fs_init_namespace(struct mlx5_flow_namespace * ns)2291 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2292 						     *ns)
2293 {
2294 	ns->node.type = FS_TYPE_NAMESPACE;
2295 
2296 	return ns;
2297 }
2298 
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2299 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2300 						       int def_miss_act)
2301 {
2302 	struct mlx5_flow_namespace	*ns;
2303 
2304 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2305 	if (!ns)
2306 		return ERR_PTR(-ENOMEM);
2307 
2308 	fs_init_namespace(ns);
2309 	ns->def_miss_action = def_miss_act;
2310 	tree_init_node(&ns->node, NULL, del_sw_ns);
2311 	tree_add_node(&ns->node, &prio->node);
2312 	list_add_tail(&ns->node.list, &prio->node.children);
2313 
2314 	return ns;
2315 }
2316 
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2317 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2318 			     struct init_tree_node *prio_metadata)
2319 {
2320 	struct fs_prio *fs_prio;
2321 	int i;
2322 
2323 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2324 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2325 		if (IS_ERR(fs_prio))
2326 			return PTR_ERR(fs_prio);
2327 	}
2328 	return 0;
2329 }
2330 
2331 #define FLOW_TABLE_BIT_SZ 1
2332 #define GET_FLOW_TABLE_CAP(dev, offset) \
2333 	((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) +	\
2334 			offset / 32)) >>					\
2335 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2336 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2337 {
2338 	int i;
2339 
2340 	for (i = 0; i < caps->arr_sz; i++) {
2341 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2342 			return false;
2343 	}
2344 	return true;
2345 }
2346 
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2347 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2348 				    struct init_tree_node *init_node,
2349 				    struct fs_node *fs_parent_node,
2350 				    struct init_tree_node *init_parent_node,
2351 				    int prio)
2352 {
2353 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2354 					      flow_table_properties_nic_receive.
2355 					      max_ft_level);
2356 	struct mlx5_flow_namespace *fs_ns;
2357 	struct fs_prio *fs_prio;
2358 	struct fs_node *base;
2359 	int i;
2360 	int err;
2361 
2362 	if (init_node->type == FS_TYPE_PRIO) {
2363 		if ((init_node->min_ft_level > max_ft_level) ||
2364 		    !has_required_caps(steering->dev, &init_node->caps))
2365 			return 0;
2366 
2367 		fs_get_obj(fs_ns, fs_parent_node);
2368 		if (init_node->num_leaf_prios)
2369 			return create_leaf_prios(fs_ns, prio, init_node);
2370 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2371 		if (IS_ERR(fs_prio))
2372 			return PTR_ERR(fs_prio);
2373 		base = &fs_prio->node;
2374 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2375 		fs_get_obj(fs_prio, fs_parent_node);
2376 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2377 		if (IS_ERR(fs_ns))
2378 			return PTR_ERR(fs_ns);
2379 		base = &fs_ns->node;
2380 	} else {
2381 		return -EINVAL;
2382 	}
2383 	prio = 0;
2384 	for (i = 0; i < init_node->ar_size; i++) {
2385 		err = init_root_tree_recursive(steering, &init_node->children[i],
2386 					       base, init_node, prio);
2387 		if (err)
2388 			return err;
2389 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2390 		    init_node->children[i].num_leaf_prios) {
2391 			prio += init_node->children[i].num_leaf_prios;
2392 		}
2393 	}
2394 
2395 	return 0;
2396 }
2397 
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2398 static int init_root_tree(struct mlx5_flow_steering *steering,
2399 			  struct init_tree_node *init_node,
2400 			  struct fs_node *fs_parent_node)
2401 {
2402 	int i;
2403 	struct mlx5_flow_namespace *fs_ns;
2404 	int err;
2405 
2406 	fs_get_obj(fs_ns, fs_parent_node);
2407 	for (i = 0; i < init_node->ar_size; i++) {
2408 		err = init_root_tree_recursive(steering, &init_node->children[i],
2409 					       &fs_ns->node,
2410 					       init_node, i);
2411 		if (err)
2412 			return err;
2413 	}
2414 	return 0;
2415 }
2416 
del_sw_root_ns(struct fs_node * node)2417 static void del_sw_root_ns(struct fs_node *node)
2418 {
2419 	struct mlx5_flow_root_namespace *root_ns;
2420 	struct mlx5_flow_namespace *ns;
2421 
2422 	fs_get_obj(ns, node);
2423 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2424 	mutex_destroy(&root_ns->chain_lock);
2425 	kfree(node);
2426 }
2427 
2428 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2429 *create_root_ns(struct mlx5_flow_steering *steering,
2430 		enum fs_flow_table_type table_type)
2431 {
2432 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2433 	struct mlx5_flow_root_namespace *root_ns;
2434 	struct mlx5_flow_namespace *ns;
2435 
2436 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2437 	    (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2438 		cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2439 
2440 	/* Create the root namespace */
2441 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2442 	if (!root_ns)
2443 		return NULL;
2444 
2445 	root_ns->dev = steering->dev;
2446 	root_ns->table_type = table_type;
2447 	root_ns->cmds = cmds;
2448 
2449 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2450 
2451 	ns = &root_ns->ns;
2452 	fs_init_namespace(ns);
2453 	mutex_init(&root_ns->chain_lock);
2454 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2455 	tree_add_node(&ns->node, NULL);
2456 
2457 	return root_ns;
2458 }
2459 
2460 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2461 
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2462 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2463 {
2464 	struct fs_prio *prio;
2465 
2466 	fs_for_each_prio(prio, ns) {
2467 		 /* This updates prio start_level and num_levels */
2468 		set_prio_attrs_in_prio(prio, acc_level);
2469 		acc_level += prio->num_levels;
2470 	}
2471 	return acc_level;
2472 }
2473 
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2474 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2475 {
2476 	struct mlx5_flow_namespace *ns;
2477 	int acc_level_ns = acc_level;
2478 
2479 	prio->start_level = acc_level;
2480 	fs_for_each_ns(ns, prio) {
2481 		/* This updates start_level and num_levels of ns's priority descendants */
2482 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2483 
2484 		/* If this a prio with chains, and we can jump from one chain
2485 		 * (namepsace) to another, so we accumulate the levels
2486 		 */
2487 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2488 			acc_level = acc_level_ns;
2489 	}
2490 
2491 	if (!prio->num_levels)
2492 		prio->num_levels = acc_level_ns - prio->start_level;
2493 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2494 }
2495 
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2496 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2497 {
2498 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2499 	struct fs_prio *prio;
2500 	int start_level = 0;
2501 
2502 	fs_for_each_prio(prio, ns) {
2503 		set_prio_attrs_in_prio(prio, start_level);
2504 		start_level += prio->num_levels;
2505 	}
2506 }
2507 
2508 #define ANCHOR_PRIO 0
2509 #define ANCHOR_SIZE 1
2510 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2511 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2512 {
2513 	struct mlx5_flow_namespace *ns = NULL;
2514 	struct mlx5_flow_table_attr ft_attr = {};
2515 	struct mlx5_flow_table *ft;
2516 
2517 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2518 	if (WARN_ON(!ns))
2519 		return -EINVAL;
2520 
2521 	ft_attr.max_fte = ANCHOR_SIZE;
2522 	ft_attr.level   = ANCHOR_LEVEL;
2523 	ft_attr.prio    = ANCHOR_PRIO;
2524 
2525 	ft = mlx5_create_flow_table(ns, &ft_attr);
2526 	if (IS_ERR(ft)) {
2527 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2528 		return PTR_ERR(ft);
2529 	}
2530 	return 0;
2531 }
2532 
init_root_ns(struct mlx5_flow_steering * steering)2533 static int init_root_ns(struct mlx5_flow_steering *steering)
2534 {
2535 	int err;
2536 
2537 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2538 	if (!steering->root_ns)
2539 		return -ENOMEM;
2540 
2541 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2542 	if (err)
2543 		goto out_err;
2544 
2545 	set_prio_attrs(steering->root_ns);
2546 	err = create_anchor_flow_table(steering);
2547 	if (err)
2548 		goto out_err;
2549 
2550 	return 0;
2551 
2552 out_err:
2553 	cleanup_root_ns(steering->root_ns);
2554 	steering->root_ns = NULL;
2555 	return err;
2556 }
2557 
clean_tree(struct fs_node * node)2558 static void clean_tree(struct fs_node *node)
2559 {
2560 	if (node) {
2561 		struct fs_node *iter;
2562 		struct fs_node *temp;
2563 
2564 		tree_get_node(node);
2565 		list_for_each_entry_safe(iter, temp, &node->children, list)
2566 			clean_tree(iter);
2567 		tree_put_node(node, false);
2568 		tree_remove_node(node, false);
2569 	}
2570 }
2571 
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2572 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2573 {
2574 	if (!root_ns)
2575 		return;
2576 
2577 	clean_tree(&root_ns->ns.node);
2578 }
2579 
cleanup_egress_acls_root_ns(struct mlx5_core_dev * dev)2580 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2581 {
2582 	struct mlx5_flow_steering *steering = dev->priv.steering;
2583 	int i;
2584 
2585 	if (!steering->esw_egress_root_ns)
2586 		return;
2587 
2588 	for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2589 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2590 
2591 	kfree(steering->esw_egress_root_ns);
2592 	steering->esw_egress_root_ns = NULL;
2593 }
2594 
cleanup_ingress_acls_root_ns(struct mlx5_core_dev * dev)2595 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2596 {
2597 	struct mlx5_flow_steering *steering = dev->priv.steering;
2598 	int i;
2599 
2600 	if (!steering->esw_ingress_root_ns)
2601 		return;
2602 
2603 	for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2604 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2605 
2606 	kfree(steering->esw_ingress_root_ns);
2607 	steering->esw_ingress_root_ns = NULL;
2608 }
2609 
mlx5_cleanup_fs(struct mlx5_core_dev * dev)2610 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2611 {
2612 	struct mlx5_flow_steering *steering = dev->priv.steering;
2613 
2614 	cleanup_root_ns(steering->root_ns);
2615 	cleanup_egress_acls_root_ns(dev);
2616 	cleanup_ingress_acls_root_ns(dev);
2617 	cleanup_root_ns(steering->fdb_root_ns);
2618 	steering->fdb_root_ns = NULL;
2619 	kfree(steering->fdb_sub_ns);
2620 	steering->fdb_sub_ns = NULL;
2621 	cleanup_root_ns(steering->sniffer_rx_root_ns);
2622 	cleanup_root_ns(steering->sniffer_tx_root_ns);
2623 	cleanup_root_ns(steering->rdma_rx_root_ns);
2624 	cleanup_root_ns(steering->rdma_tx_root_ns);
2625 	cleanup_root_ns(steering->egress_root_ns);
2626 	mlx5_cleanup_fc_stats(dev);
2627 	kmem_cache_destroy(steering->ftes_cache);
2628 	kmem_cache_destroy(steering->fgs_cache);
2629 	kfree(steering);
2630 }
2631 
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2632 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2633 {
2634 	struct fs_prio *prio;
2635 
2636 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2637 	if (!steering->sniffer_tx_root_ns)
2638 		return -ENOMEM;
2639 
2640 	/* Create single prio */
2641 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2642 	return PTR_ERR_OR_ZERO(prio);
2643 }
2644 
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2645 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2646 {
2647 	struct fs_prio *prio;
2648 
2649 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2650 	if (!steering->sniffer_rx_root_ns)
2651 		return -ENOMEM;
2652 
2653 	/* Create single prio */
2654 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2655 	return PTR_ERR_OR_ZERO(prio);
2656 }
2657 
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2658 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2659 {
2660 	int err;
2661 
2662 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2663 	if (!steering->rdma_rx_root_ns)
2664 		return -ENOMEM;
2665 
2666 	err = init_root_tree(steering, &rdma_rx_root_fs,
2667 			     &steering->rdma_rx_root_ns->ns.node);
2668 	if (err)
2669 		goto out_err;
2670 
2671 	set_prio_attrs(steering->rdma_rx_root_ns);
2672 
2673 	return 0;
2674 
2675 out_err:
2676 	cleanup_root_ns(steering->rdma_rx_root_ns);
2677 	steering->rdma_rx_root_ns = NULL;
2678 	return err;
2679 }
2680 
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2681 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2682 {
2683 	int err;
2684 
2685 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2686 	if (!steering->rdma_tx_root_ns)
2687 		return -ENOMEM;
2688 
2689 	err = init_root_tree(steering, &rdma_tx_root_fs,
2690 			     &steering->rdma_tx_root_ns->ns.node);
2691 	if (err)
2692 		goto out_err;
2693 
2694 	set_prio_attrs(steering->rdma_tx_root_ns);
2695 
2696 	return 0;
2697 
2698 out_err:
2699 	cleanup_root_ns(steering->rdma_tx_root_ns);
2700 	steering->rdma_tx_root_ns = NULL;
2701 	return err;
2702 }
2703 
2704 /* FT and tc chains are stored in the same array so we can re-use the
2705  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2706  * When creating a new ns for each chain store it in the first available slot.
2707  * Assume tc chains are created and stored first and only then the FT chain.
2708  */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2709 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2710 					struct mlx5_flow_namespace *ns)
2711 {
2712 	int chain = 0;
2713 
2714 	while (steering->fdb_sub_ns[chain])
2715 		++chain;
2716 
2717 	steering->fdb_sub_ns[chain] = ns;
2718 }
2719 
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2720 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2721 					struct fs_prio *maj_prio)
2722 {
2723 	struct mlx5_flow_namespace *ns;
2724 	struct fs_prio *min_prio;
2725 	int prio;
2726 
2727 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2728 	if (IS_ERR(ns))
2729 		return PTR_ERR(ns);
2730 
2731 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2732 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2733 		if (IS_ERR(min_prio))
2734 			return PTR_ERR(min_prio);
2735 	}
2736 
2737 	store_fdb_sub_ns_prio_chain(steering, ns);
2738 
2739 	return 0;
2740 }
2741 
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2742 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2743 			     int fs_prio,
2744 			     int chains)
2745 {
2746 	struct fs_prio *maj_prio;
2747 	int levels;
2748 	int chain;
2749 	int err;
2750 
2751 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2752 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2753 					  fs_prio,
2754 					  levels);
2755 	if (IS_ERR(maj_prio))
2756 		return PTR_ERR(maj_prio);
2757 
2758 	for (chain = 0; chain < chains; chain++) {
2759 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2760 		if (err)
2761 			return err;
2762 	}
2763 
2764 	return 0;
2765 }
2766 
create_fdb_fast_path(struct mlx5_flow_steering * steering)2767 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2768 {
2769 	int err;
2770 
2771 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2772 				       sizeof(*steering->fdb_sub_ns),
2773 				       GFP_KERNEL);
2774 	if (!steering->fdb_sub_ns)
2775 		return -ENOMEM;
2776 
2777 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2778 	if (err)
2779 		return err;
2780 
2781 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2782 	if (err)
2783 		return err;
2784 
2785 	return 0;
2786 }
2787 
init_fdb_root_ns(struct mlx5_flow_steering * steering)2788 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2789 {
2790 	struct fs_prio *maj_prio;
2791 	int err;
2792 
2793 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2794 	if (!steering->fdb_root_ns)
2795 		return -ENOMEM;
2796 
2797 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2798 				  1);
2799 	if (IS_ERR(maj_prio)) {
2800 		err = PTR_ERR(maj_prio);
2801 		goto out_err;
2802 	}
2803 	err = create_fdb_fast_path(steering);
2804 	if (err)
2805 		goto out_err;
2806 
2807 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2808 	if (IS_ERR(maj_prio)) {
2809 		err = PTR_ERR(maj_prio);
2810 		goto out_err;
2811 	}
2812 
2813 	/* We put this priority last, knowing that nothing will get here
2814 	 * unless explicitly forwarded to. This is possible because the
2815 	 * slow path tables have catch all rules and nothing gets passed
2816 	 * those tables.
2817 	 */
2818 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2819 	if (IS_ERR(maj_prio)) {
2820 		err = PTR_ERR(maj_prio);
2821 		goto out_err;
2822 	}
2823 
2824 	set_prio_attrs(steering->fdb_root_ns);
2825 	return 0;
2826 
2827 out_err:
2828 	cleanup_root_ns(steering->fdb_root_ns);
2829 	kfree(steering->fdb_sub_ns);
2830 	steering->fdb_sub_ns = NULL;
2831 	steering->fdb_root_ns = NULL;
2832 	return err;
2833 }
2834 
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2835 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2836 {
2837 	struct fs_prio *prio;
2838 
2839 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2840 	if (!steering->esw_egress_root_ns[vport])
2841 		return -ENOMEM;
2842 
2843 	/* create 1 prio*/
2844 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2845 	return PTR_ERR_OR_ZERO(prio);
2846 }
2847 
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2848 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2849 {
2850 	struct fs_prio *prio;
2851 
2852 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2853 	if (!steering->esw_ingress_root_ns[vport])
2854 		return -ENOMEM;
2855 
2856 	/* create 1 prio*/
2857 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2858 	return PTR_ERR_OR_ZERO(prio);
2859 }
2860 
init_egress_acls_root_ns(struct mlx5_core_dev * dev)2861 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2862 {
2863 	struct mlx5_flow_steering *steering = dev->priv.steering;
2864 	int total_vports = mlx5_eswitch_get_total_vports(dev);
2865 	int err;
2866 	int i;
2867 
2868 	steering->esw_egress_root_ns =
2869 			kcalloc(total_vports,
2870 				sizeof(*steering->esw_egress_root_ns),
2871 				GFP_KERNEL);
2872 	if (!steering->esw_egress_root_ns)
2873 		return -ENOMEM;
2874 
2875 	for (i = 0; i < total_vports; i++) {
2876 		err = init_egress_acl_root_ns(steering, i);
2877 		if (err)
2878 			goto cleanup_root_ns;
2879 	}
2880 
2881 	return 0;
2882 
2883 cleanup_root_ns:
2884 	for (i--; i >= 0; i--)
2885 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2886 	kfree(steering->esw_egress_root_ns);
2887 	steering->esw_egress_root_ns = NULL;
2888 	return err;
2889 }
2890 
init_ingress_acls_root_ns(struct mlx5_core_dev * dev)2891 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2892 {
2893 	struct mlx5_flow_steering *steering = dev->priv.steering;
2894 	int total_vports = mlx5_eswitch_get_total_vports(dev);
2895 	int err;
2896 	int i;
2897 
2898 	steering->esw_ingress_root_ns =
2899 			kcalloc(total_vports,
2900 				sizeof(*steering->esw_ingress_root_ns),
2901 				GFP_KERNEL);
2902 	if (!steering->esw_ingress_root_ns)
2903 		return -ENOMEM;
2904 
2905 	for (i = 0; i < total_vports; i++) {
2906 		err = init_ingress_acl_root_ns(steering, i);
2907 		if (err)
2908 			goto cleanup_root_ns;
2909 	}
2910 
2911 	return 0;
2912 
2913 cleanup_root_ns:
2914 	for (i--; i >= 0; i--)
2915 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2916 	kfree(steering->esw_ingress_root_ns);
2917 	steering->esw_ingress_root_ns = NULL;
2918 	return err;
2919 }
2920 
init_egress_root_ns(struct mlx5_flow_steering * steering)2921 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2922 {
2923 	int err;
2924 
2925 	steering->egress_root_ns = create_root_ns(steering,
2926 						  FS_FT_NIC_TX);
2927 	if (!steering->egress_root_ns)
2928 		return -ENOMEM;
2929 
2930 	err = init_root_tree(steering, &egress_root_fs,
2931 			     &steering->egress_root_ns->ns.node);
2932 	if (err)
2933 		goto cleanup;
2934 	set_prio_attrs(steering->egress_root_ns);
2935 	return 0;
2936 cleanup:
2937 	cleanup_root_ns(steering->egress_root_ns);
2938 	steering->egress_root_ns = NULL;
2939 	return err;
2940 }
2941 
mlx5_init_fs(struct mlx5_core_dev * dev)2942 int mlx5_init_fs(struct mlx5_core_dev *dev)
2943 {
2944 	struct mlx5_flow_steering *steering;
2945 	int err = 0;
2946 
2947 	err = mlx5_init_fc_stats(dev);
2948 	if (err)
2949 		return err;
2950 
2951 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2952 	if (!steering)
2953 		return -ENOMEM;
2954 	steering->dev = dev;
2955 	dev->priv.steering = steering;
2956 
2957 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2958 						sizeof(struct mlx5_flow_group), 0,
2959 						0, NULL);
2960 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2961 						 0, NULL);
2962 	if (!steering->ftes_cache || !steering->fgs_cache) {
2963 		err = -ENOMEM;
2964 		goto err;
2965 	}
2966 
2967 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2968 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2969 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2970 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2971 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2972 		err = init_root_ns(steering);
2973 		if (err)
2974 			goto err;
2975 	}
2976 
2977 	if (MLX5_ESWITCH_MANAGER(dev)) {
2978 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2979 			err = init_fdb_root_ns(steering);
2980 			if (err)
2981 				goto err;
2982 		}
2983 		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2984 			err = init_egress_acls_root_ns(dev);
2985 			if (err)
2986 				goto err;
2987 		}
2988 		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2989 			err = init_ingress_acls_root_ns(dev);
2990 			if (err)
2991 				goto err;
2992 		}
2993 	}
2994 
2995 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2996 		err = init_sniffer_rx_root_ns(steering);
2997 		if (err)
2998 			goto err;
2999 	}
3000 
3001 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3002 		err = init_sniffer_tx_root_ns(steering);
3003 		if (err)
3004 			goto err;
3005 	}
3006 
3007 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3008 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3009 		err = init_rdma_rx_root_ns(steering);
3010 		if (err)
3011 			goto err;
3012 	}
3013 
3014 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3015 		err = init_rdma_tx_root_ns(steering);
3016 		if (err)
3017 			goto err;
3018 	}
3019 
3020 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3021 	    MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3022 		err = init_egress_root_ns(steering);
3023 		if (err)
3024 			goto err;
3025 	}
3026 
3027 	return 0;
3028 err:
3029 	mlx5_cleanup_fs(dev);
3030 	return err;
3031 }
3032 
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3033 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3034 {
3035 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3036 	struct mlx5_ft_underlay_qp *new_uqp;
3037 	int err = 0;
3038 
3039 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3040 	if (!new_uqp)
3041 		return -ENOMEM;
3042 
3043 	mutex_lock(&root->chain_lock);
3044 
3045 	if (!root->root_ft) {
3046 		err = -EINVAL;
3047 		goto update_ft_fail;
3048 	}
3049 
3050 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3051 					 false);
3052 	if (err) {
3053 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3054 			       underlay_qpn, err);
3055 		goto update_ft_fail;
3056 	}
3057 
3058 	new_uqp->qpn = underlay_qpn;
3059 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3060 
3061 	mutex_unlock(&root->chain_lock);
3062 
3063 	return 0;
3064 
3065 update_ft_fail:
3066 	mutex_unlock(&root->chain_lock);
3067 	kfree(new_uqp);
3068 	return err;
3069 }
3070 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3071 
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3072 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3073 {
3074 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3075 	struct mlx5_ft_underlay_qp *uqp;
3076 	bool found = false;
3077 	int err = 0;
3078 
3079 	mutex_lock(&root->chain_lock);
3080 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3081 		if (uqp->qpn == underlay_qpn) {
3082 			found = true;
3083 			break;
3084 		}
3085 	}
3086 
3087 	if (!found) {
3088 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3089 			       underlay_qpn);
3090 		err = -EINVAL;
3091 		goto out;
3092 	}
3093 
3094 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3095 					 true);
3096 	if (err)
3097 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3098 			       underlay_qpn, err);
3099 
3100 	list_del(&uqp->list);
3101 	mutex_unlock(&root->chain_lock);
3102 	kfree(uqp);
3103 
3104 	return 0;
3105 
3106 out:
3107 	mutex_unlock(&root->chain_lock);
3108 	return err;
3109 }
3110 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3111 
3112 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3113 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3114 {
3115 	struct mlx5_flow_namespace *ns;
3116 
3117 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3118 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3119 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3120 	else
3121 		ns = mlx5_get_flow_namespace(dev, ns_type);
3122 	if (!ns)
3123 		return NULL;
3124 
3125 	return find_root(&ns->node);
3126 }
3127 
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3128 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3129 						 u8 ns_type, u8 num_actions,
3130 						 void *modify_actions)
3131 {
3132 	struct mlx5_flow_root_namespace *root;
3133 	struct mlx5_modify_hdr *modify_hdr;
3134 	int err;
3135 
3136 	root = get_root_namespace(dev, ns_type);
3137 	if (!root)
3138 		return ERR_PTR(-EOPNOTSUPP);
3139 
3140 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3141 	if (!modify_hdr)
3142 		return ERR_PTR(-ENOMEM);
3143 
3144 	modify_hdr->ns_type = ns_type;
3145 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3146 					      modify_actions, modify_hdr);
3147 	if (err) {
3148 		kfree(modify_hdr);
3149 		return ERR_PTR(err);
3150 	}
3151 
3152 	return modify_hdr;
3153 }
3154 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3155 
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3156 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3157 				struct mlx5_modify_hdr *modify_hdr)
3158 {
3159 	struct mlx5_flow_root_namespace *root;
3160 
3161 	root = get_root_namespace(dev, modify_hdr->ns_type);
3162 	if (WARN_ON(!root))
3163 		return;
3164 	root->cmds->modify_header_dealloc(root, modify_hdr);
3165 	kfree(modify_hdr);
3166 }
3167 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3168 
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type ns_type)3169 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3170 						     int reformat_type,
3171 						     size_t size,
3172 						     void *reformat_data,
3173 						     enum mlx5_flow_namespace_type ns_type)
3174 {
3175 	struct mlx5_pkt_reformat *pkt_reformat;
3176 	struct mlx5_flow_root_namespace *root;
3177 	int err;
3178 
3179 	root = get_root_namespace(dev, ns_type);
3180 	if (!root)
3181 		return ERR_PTR(-EOPNOTSUPP);
3182 
3183 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3184 	if (!pkt_reformat)
3185 		return ERR_PTR(-ENOMEM);
3186 
3187 	pkt_reformat->ns_type = ns_type;
3188 	pkt_reformat->reformat_type = reformat_type;
3189 	err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3190 						reformat_data, ns_type,
3191 						pkt_reformat);
3192 	if (err) {
3193 		kfree(pkt_reformat);
3194 		return ERR_PTR(err);
3195 	}
3196 
3197 	return pkt_reformat;
3198 }
3199 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3200 
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3201 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3202 				  struct mlx5_pkt_reformat *pkt_reformat)
3203 {
3204 	struct mlx5_flow_root_namespace *root;
3205 
3206 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3207 	if (WARN_ON(!root))
3208 		return;
3209 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3210 	kfree(pkt_reformat);
3211 }
3212 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3213 
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3214 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3215 				 struct mlx5_flow_root_namespace *peer_ns)
3216 {
3217 	if (peer_ns && ns->mode != peer_ns->mode) {
3218 		mlx5_core_err(ns->dev,
3219 			      "Can't peer namespace of different steering mode\n");
3220 		return -EINVAL;
3221 	}
3222 
3223 	return ns->cmds->set_peer(ns, peer_ns);
3224 }
3225 
3226 /* This function should be called only at init stage of the namespace.
3227  * It is not safe to call this function while steering operations
3228  * are executed in the namespace.
3229  */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3230 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3231 				 enum mlx5_flow_steering_mode mode)
3232 {
3233 	struct mlx5_flow_root_namespace *root;
3234 	const struct mlx5_flow_cmds *cmds;
3235 	int err;
3236 
3237 	root = find_root(&ns->node);
3238 	if (&root->ns != ns)
3239 	/* Can't set cmds to non root namespace */
3240 		return -EINVAL;
3241 
3242 	if (root->table_type != FS_FT_FDB)
3243 		return -EOPNOTSUPP;
3244 
3245 	if (root->mode == mode)
3246 		return 0;
3247 
3248 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3249 		cmds = mlx5_fs_cmd_get_dr_cmds();
3250 	else
3251 		cmds = mlx5_fs_cmd_get_fw_cmds();
3252 	if (!cmds)
3253 		return -EOPNOTSUPP;
3254 
3255 	err = cmds->create_ns(root);
3256 	if (err) {
3257 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3258 			      err);
3259 		return err;
3260 	}
3261 
3262 	root->cmds->destroy_ns(root);
3263 	root->cmds = cmds;
3264 	root->mode = mode;
3265 
3266 	return 0;
3267 }
3268