1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11
12 #define MLX5_TTC_NUM_GROUPS 3
13 #define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
14 #define MLX5_TTC_GROUP2_SIZE BIT(1)
15 #define MLX5_TTC_GROUP3_SIZE BIT(0)
16 #define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
17 MLX5_TTC_GROUP2_SIZE +\
18 MLX5_TTC_GROUP3_SIZE)
19
20 #define MLX5_INNER_TTC_NUM_GROUPS 3
21 #define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
22 #define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
23 #define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
24 #define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
25 MLX5_INNER_TTC_GROUP2_SIZE +\
26 MLX5_INNER_TTC_GROUP3_SIZE)
27
28 /* L3/L4 traffic type classifier */
29 struct mlx5_ttc_table {
30 int num_groups;
31 struct mlx5_flow_table *t;
32 struct mlx5_flow_group **g;
33 struct mlx5_ttc_rule rules[MLX5_NUM_TT];
34 struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
35 };
36
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)37 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
38 {
39 return ttc->t;
40 }
41
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)42 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
43 {
44 int i;
45
46 for (i = 0; i < MLX5_NUM_TT; i++) {
47 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
48 mlx5_del_flow_rules(ttc->rules[i].rule);
49 ttc->rules[i].rule = NULL;
50 }
51 }
52
53 for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
54 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
55 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
56 ttc->tunnel_rules[i] = NULL;
57 }
58 }
59 }
60
61 struct mlx5_etype_proto {
62 u16 etype;
63 u8 proto;
64 };
65
66 static struct mlx5_etype_proto ttc_rules[] = {
67 [MLX5_TT_IPV4_TCP] = {
68 .etype = ETH_P_IP,
69 .proto = IPPROTO_TCP,
70 },
71 [MLX5_TT_IPV6_TCP] = {
72 .etype = ETH_P_IPV6,
73 .proto = IPPROTO_TCP,
74 },
75 [MLX5_TT_IPV4_UDP] = {
76 .etype = ETH_P_IP,
77 .proto = IPPROTO_UDP,
78 },
79 [MLX5_TT_IPV6_UDP] = {
80 .etype = ETH_P_IPV6,
81 .proto = IPPROTO_UDP,
82 },
83 [MLX5_TT_IPV4_IPSEC_AH] = {
84 .etype = ETH_P_IP,
85 .proto = IPPROTO_AH,
86 },
87 [MLX5_TT_IPV6_IPSEC_AH] = {
88 .etype = ETH_P_IPV6,
89 .proto = IPPROTO_AH,
90 },
91 [MLX5_TT_IPV4_IPSEC_ESP] = {
92 .etype = ETH_P_IP,
93 .proto = IPPROTO_ESP,
94 },
95 [MLX5_TT_IPV6_IPSEC_ESP] = {
96 .etype = ETH_P_IPV6,
97 .proto = IPPROTO_ESP,
98 },
99 [MLX5_TT_IPV4] = {
100 .etype = ETH_P_IP,
101 .proto = 0,
102 },
103 [MLX5_TT_IPV6] = {
104 .etype = ETH_P_IPV6,
105 .proto = 0,
106 },
107 [MLX5_TT_ANY] = {
108 .etype = 0,
109 .proto = 0,
110 },
111 };
112
113 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
114 [MLX5_TT_IPV4_GRE] = {
115 .etype = ETH_P_IP,
116 .proto = IPPROTO_GRE,
117 },
118 [MLX5_TT_IPV6_GRE] = {
119 .etype = ETH_P_IPV6,
120 .proto = IPPROTO_GRE,
121 },
122 [MLX5_TT_IPV4_IPIP] = {
123 .etype = ETH_P_IP,
124 .proto = IPPROTO_IPIP,
125 },
126 [MLX5_TT_IPV6_IPIP] = {
127 .etype = ETH_P_IPV6,
128 .proto = IPPROTO_IPIP,
129 },
130 [MLX5_TT_IPV4_IPV6] = {
131 .etype = ETH_P_IP,
132 .proto = IPPROTO_IPV6,
133 },
134 [MLX5_TT_IPV6_IPV6] = {
135 .etype = ETH_P_IPV6,
136 .proto = IPPROTO_IPV6,
137 },
138
139 };
140
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)141 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
142 {
143 return ttc_tunnel_rules[tt].proto;
144 }
145
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)146 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
147 u8 proto_type)
148 {
149 switch (proto_type) {
150 case IPPROTO_GRE:
151 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
152 case IPPROTO_IPIP:
153 case IPPROTO_IPV6:
154 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
155 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
156 default:
157 return false;
158 }
159 }
160
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)161 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
162 {
163 int tt;
164
165 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
166 if (mlx5_tunnel_proto_supported_rx(mdev,
167 ttc_tunnel_rules[tt].proto))
168 return true;
169 }
170 return false;
171 }
172
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)173 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
174 {
175 return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
176 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
177 ft_field_support.inner_ip_version));
178 }
179
mlx5_etype_to_ipv(u16 ethertype)180 static u8 mlx5_etype_to_ipv(u16 ethertype)
181 {
182 if (ethertype == ETH_P_IP)
183 return 4;
184
185 if (ethertype == ETH_P_IPV6)
186 return 6;
187
188 return 0;
189 }
190
191 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)192 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
193 struct mlx5_flow_destination *dest, u16 etype, u8 proto)
194 {
195 int match_ipv_outer =
196 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
197 ft_field_support.outer_ip_version);
198 MLX5_DECLARE_FLOW_ACT(flow_act);
199 struct mlx5_flow_handle *rule;
200 struct mlx5_flow_spec *spec;
201 int err = 0;
202 u8 ipv;
203
204 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
205 if (!spec)
206 return ERR_PTR(-ENOMEM);
207
208 if (proto) {
209 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
211 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
212 }
213
214 ipv = mlx5_etype_to_ipv(etype);
215 if (match_ipv_outer && ipv) {
216 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
217 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
218 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
219 } else if (etype) {
220 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
221 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
222 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
223 }
224
225 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
226 if (IS_ERR(rule)) {
227 err = PTR_ERR(rule);
228 mlx5_core_err(dev, "%s: add rule failed\n", __func__);
229 }
230
231 kvfree(spec);
232 return err ? ERR_PTR(err) : rule;
233 }
234
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)235 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
236 struct ttc_params *params,
237 struct mlx5_ttc_table *ttc)
238 {
239 struct mlx5_flow_handle **trules;
240 struct mlx5_ttc_rule *rules;
241 struct mlx5_flow_table *ft;
242 int tt;
243 int err;
244
245 ft = ttc->t;
246 rules = ttc->rules;
247 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
248 struct mlx5_ttc_rule *rule = &rules[tt];
249
250 rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt],
251 ttc_rules[tt].etype,
252 ttc_rules[tt].proto);
253 if (IS_ERR(rule->rule)) {
254 err = PTR_ERR(rule->rule);
255 rule->rule = NULL;
256 goto del_rules;
257 }
258 rule->default_dest = params->dests[tt];
259 }
260
261 if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
262 return 0;
263
264 trules = ttc->tunnel_rules;
265 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
266 if (!mlx5_tunnel_proto_supported_rx(dev,
267 ttc_tunnel_rules[tt].proto))
268 continue;
269 trules[tt] = mlx5_generate_ttc_rule(dev, ft,
270 ¶ms->tunnel_dests[tt],
271 ttc_tunnel_rules[tt].etype,
272 ttc_tunnel_rules[tt].proto);
273 if (IS_ERR(trules[tt])) {
274 err = PTR_ERR(trules[tt]);
275 trules[tt] = NULL;
276 goto del_rules;
277 }
278 }
279
280 return 0;
281
282 del_rules:
283 mlx5_cleanup_ttc_rules(ttc);
284 return err;
285 }
286
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv)287 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
288 bool use_ipv)
289 {
290 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
291 int ix = 0;
292 u32 *in;
293 int err;
294 u8 *mc;
295
296 ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
297 if (!ttc->g)
298 return -ENOMEM;
299 in = kvzalloc(inlen, GFP_KERNEL);
300 if (!in) {
301 kfree(ttc->g);
302 ttc->g = NULL;
303 return -ENOMEM;
304 }
305
306 /* L4 Group */
307 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
308 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
309 if (use_ipv)
310 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
311 else
312 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
313 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
314 MLX5_SET_CFG(in, start_flow_index, ix);
315 ix += MLX5_TTC_GROUP1_SIZE;
316 MLX5_SET_CFG(in, end_flow_index, ix - 1);
317 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
318 if (IS_ERR(ttc->g[ttc->num_groups]))
319 goto err;
320 ttc->num_groups++;
321
322 /* L3 Group */
323 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
324 MLX5_SET_CFG(in, start_flow_index, ix);
325 ix += MLX5_TTC_GROUP2_SIZE;
326 MLX5_SET_CFG(in, end_flow_index, ix - 1);
327 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
328 if (IS_ERR(ttc->g[ttc->num_groups]))
329 goto err;
330 ttc->num_groups++;
331
332 /* Any Group */
333 memset(in, 0, inlen);
334 MLX5_SET_CFG(in, start_flow_index, ix);
335 ix += MLX5_TTC_GROUP3_SIZE;
336 MLX5_SET_CFG(in, end_flow_index, ix - 1);
337 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
338 if (IS_ERR(ttc->g[ttc->num_groups]))
339 goto err;
340 ttc->num_groups++;
341
342 kvfree(in);
343 return 0;
344
345 err:
346 err = PTR_ERR(ttc->g[ttc->num_groups]);
347 ttc->g[ttc->num_groups] = NULL;
348 kvfree(in);
349
350 return err;
351 }
352
353 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)354 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
355 struct mlx5_flow_table *ft,
356 struct mlx5_flow_destination *dest,
357 u16 etype, u8 proto)
358 {
359 MLX5_DECLARE_FLOW_ACT(flow_act);
360 struct mlx5_flow_handle *rule;
361 struct mlx5_flow_spec *spec;
362 int err = 0;
363 u8 ipv;
364
365 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
366 if (!spec)
367 return ERR_PTR(-ENOMEM);
368
369 ipv = mlx5_etype_to_ipv(etype);
370 if (etype && ipv) {
371 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
372 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
373 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
374 }
375
376 if (proto) {
377 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
378 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
379 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
380 }
381
382 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
383 if (IS_ERR(rule)) {
384 err = PTR_ERR(rule);
385 mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
386 }
387
388 kvfree(spec);
389 return err ? ERR_PTR(err) : rule;
390 }
391
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)392 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
393 struct ttc_params *params,
394 struct mlx5_ttc_table *ttc)
395 {
396 struct mlx5_ttc_rule *rules;
397 struct mlx5_flow_table *ft;
398 int err;
399 int tt;
400
401 ft = ttc->t;
402 rules = ttc->rules;
403
404 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
405 struct mlx5_ttc_rule *rule = &rules[tt];
406
407 rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
408 ¶ms->dests[tt],
409 ttc_rules[tt].etype,
410 ttc_rules[tt].proto);
411 if (IS_ERR(rule->rule)) {
412 err = PTR_ERR(rule->rule);
413 rule->rule = NULL;
414 goto del_rules;
415 }
416 rule->default_dest = params->dests[tt];
417 }
418
419 return 0;
420
421 del_rules:
422
423 mlx5_cleanup_ttc_rules(ttc);
424 return err;
425 }
426
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc)427 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
428 {
429 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
430 int ix = 0;
431 u32 *in;
432 int err;
433 u8 *mc;
434
435 ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
436 GFP_KERNEL);
437 if (!ttc->g)
438 return -ENOMEM;
439 in = kvzalloc(inlen, GFP_KERNEL);
440 if (!in) {
441 kfree(ttc->g);
442 ttc->g = NULL;
443 return -ENOMEM;
444 }
445
446 /* L4 Group */
447 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
448 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
449 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
450 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
451 MLX5_SET_CFG(in, start_flow_index, ix);
452 ix += MLX5_INNER_TTC_GROUP1_SIZE;
453 MLX5_SET_CFG(in, end_flow_index, ix - 1);
454 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
455 if (IS_ERR(ttc->g[ttc->num_groups]))
456 goto err;
457 ttc->num_groups++;
458
459 /* L3 Group */
460 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
461 MLX5_SET_CFG(in, start_flow_index, ix);
462 ix += MLX5_INNER_TTC_GROUP2_SIZE;
463 MLX5_SET_CFG(in, end_flow_index, ix - 1);
464 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
465 if (IS_ERR(ttc->g[ttc->num_groups]))
466 goto err;
467 ttc->num_groups++;
468
469 /* Any Group */
470 memset(in, 0, inlen);
471 MLX5_SET_CFG(in, start_flow_index, ix);
472 ix += MLX5_INNER_TTC_GROUP3_SIZE;
473 MLX5_SET_CFG(in, end_flow_index, ix - 1);
474 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
475 if (IS_ERR(ttc->g[ttc->num_groups]))
476 goto err;
477 ttc->num_groups++;
478
479 kvfree(in);
480 return 0;
481
482 err:
483 err = PTR_ERR(ttc->g[ttc->num_groups]);
484 ttc->g[ttc->num_groups] = NULL;
485 kvfree(in);
486
487 return err;
488 }
489
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)490 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
491 struct ttc_params *params)
492 {
493 struct mlx5_ttc_table *ttc;
494 int err;
495
496 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
497 if (!ttc)
498 return ERR_PTR(-ENOMEM);
499
500 WARN_ON_ONCE(params->ft_attr.max_fte);
501 params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
502 ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr);
503 if (IS_ERR(ttc->t)) {
504 err = PTR_ERR(ttc->t);
505 kvfree(ttc);
506 return ERR_PTR(err);
507 }
508
509 err = mlx5_create_inner_ttc_table_groups(ttc);
510 if (err)
511 goto destroy_ft;
512
513 err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
514 if (err)
515 goto destroy_ft;
516
517 return ttc;
518
519 destroy_ft:
520 mlx5_destroy_ttc_table(ttc);
521 return ERR_PTR(err);
522 }
523
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)524 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
525 {
526 int i;
527
528 mlx5_cleanup_ttc_rules(ttc);
529 for (i = ttc->num_groups - 1; i >= 0; i--) {
530 if (!IS_ERR_OR_NULL(ttc->g[i]))
531 mlx5_destroy_flow_group(ttc->g[i]);
532 ttc->g[i] = NULL;
533 }
534
535 kfree(ttc->g);
536 mlx5_destroy_flow_table(ttc->t);
537 kvfree(ttc);
538 }
539
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)540 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
541 struct ttc_params *params)
542 {
543 bool match_ipv_outer =
544 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
545 ft_field_support.outer_ip_version);
546 struct mlx5_ttc_table *ttc;
547 int err;
548
549 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
550 if (!ttc)
551 return ERR_PTR(-ENOMEM);
552
553 WARN_ON_ONCE(params->ft_attr.max_fte);
554 params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
555 ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr);
556 if (IS_ERR(ttc->t)) {
557 err = PTR_ERR(ttc->t);
558 kvfree(ttc);
559 return ERR_PTR(err);
560 }
561
562 err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
563 if (err)
564 goto destroy_ft;
565
566 err = mlx5_generate_ttc_table_rules(dev, params, ttc);
567 if (err)
568 goto destroy_ft;
569
570 return ttc;
571
572 destroy_ft:
573 mlx5_destroy_ttc_table(ttc);
574 return ERR_PTR(err);
575 }
576
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)577 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
578 struct mlx5_flow_destination *new_dest)
579 {
580 return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
581 NULL);
582 }
583
584 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)585 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
586 enum mlx5_traffic_types type)
587 {
588 struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
589
590 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
591 "TTC[%d] default dest is not setup yet", type);
592
593 return *dest;
594 }
595
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)596 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
597 enum mlx5_traffic_types type)
598 {
599 struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
600
601 return mlx5_ttc_fwd_dest(ttc, type, &dest);
602 }
603