1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifdef CONFIG_RFS_ACCEL
34
35 #include <linux/hash.h>
36 #include <linux/mlx5/fs.h>
37 #include <linux/ip.h>
38 #include <linux/ipv6.h>
39 #include "en.h"
40
41 struct arfs_tuple {
42 __be16 etype;
43 u8 ip_proto;
44 union {
45 __be32 src_ipv4;
46 struct in6_addr src_ipv6;
47 };
48 union {
49 __be32 dst_ipv4;
50 struct in6_addr dst_ipv6;
51 };
52 __be16 src_port;
53 __be16 dst_port;
54 };
55
56 struct arfs_rule {
57 struct mlx5e_priv *priv;
58 struct work_struct arfs_work;
59 struct mlx5_flow_handle *rule;
60 struct hlist_node hlist;
61 int rxq;
62 /* Flow ID passed to ndo_rx_flow_steer */
63 int flow_id;
64 /* Filter ID returned by ndo_rx_flow_steer */
65 int filter_id;
66 struct arfs_tuple tuple;
67 };
68
69 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
70 for (i = 0; i < ARFS_NUM_TYPES; i++) \
71 mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
72
73 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
74 for (j = 0; j < ARFS_HASH_SIZE; j++) \
75 hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
76
arfs_get_tt(enum arfs_type type)77 static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
78 {
79 switch (type) {
80 case ARFS_IPV4_TCP:
81 return MLX5E_TT_IPV4_TCP;
82 case ARFS_IPV4_UDP:
83 return MLX5E_TT_IPV4_UDP;
84 case ARFS_IPV6_TCP:
85 return MLX5E_TT_IPV6_TCP;
86 case ARFS_IPV6_UDP:
87 return MLX5E_TT_IPV6_UDP;
88 default:
89 return -EINVAL;
90 }
91 }
92
arfs_disable(struct mlx5e_priv * priv)93 static int arfs_disable(struct mlx5e_priv *priv)
94 {
95 struct mlx5_flow_destination dest;
96 struct mlx5e_tir *tir = priv->indir_tir;
97 int err = 0;
98 int tt;
99 int i;
100
101 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
102 for (i = 0; i < ARFS_NUM_TYPES; i++) {
103 dest.tir_num = tir[i].tirn;
104 tt = arfs_get_tt(i);
105 /* Modify ttc rules destination to bypass the aRFS tables*/
106 err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
107 &dest, NULL);
108 if (err) {
109 netdev_err(priv->netdev,
110 "%s: modify ttc destination failed\n",
111 __func__);
112 return err;
113 }
114 }
115 return 0;
116 }
117
118 static void arfs_del_rules(struct mlx5e_priv *priv);
119
mlx5e_arfs_disable(struct mlx5e_priv * priv)120 int mlx5e_arfs_disable(struct mlx5e_priv *priv)
121 {
122 arfs_del_rules(priv);
123
124 return arfs_disable(priv);
125 }
126
mlx5e_arfs_enable(struct mlx5e_priv * priv)127 int mlx5e_arfs_enable(struct mlx5e_priv *priv)
128 {
129 struct mlx5_flow_destination dest;
130 int err = 0;
131 int tt;
132 int i;
133
134 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
135 for (i = 0; i < ARFS_NUM_TYPES; i++) {
136 dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
137 tt = arfs_get_tt(i);
138 /* Modify ttc rules destination to point on the aRFS FTs */
139 err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
140 &dest, NULL);
141 if (err) {
142 netdev_err(priv->netdev,
143 "%s: modify ttc destination failed err=%d\n",
144 __func__, err);
145 arfs_disable(priv);
146 return err;
147 }
148 }
149 return 0;
150 }
151
arfs_destroy_table(struct arfs_table * arfs_t)152 static void arfs_destroy_table(struct arfs_table *arfs_t)
153 {
154 mlx5_del_flow_rules(arfs_t->default_rule);
155 mlx5e_destroy_flow_table(&arfs_t->ft);
156 }
157
mlx5e_arfs_destroy_tables(struct mlx5e_priv * priv)158 void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
159 {
160 int i;
161
162 if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
163 return;
164
165 arfs_del_rules(priv);
166 destroy_workqueue(priv->fs.arfs.wq);
167 for (i = 0; i < ARFS_NUM_TYPES; i++) {
168 if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
169 arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
170 }
171 }
172
arfs_add_default_rule(struct mlx5e_priv * priv,enum arfs_type type)173 static int arfs_add_default_rule(struct mlx5e_priv *priv,
174 enum arfs_type type)
175 {
176 struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
177 struct mlx5e_tir *tir = priv->indir_tir;
178 struct mlx5_flow_destination dest;
179 MLX5_DECLARE_FLOW_ACT(flow_act);
180 struct mlx5_flow_spec *spec;
181 enum mlx5e_traffic_types tt;
182 int err = 0;
183
184 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
185 if (!spec) {
186 err = -ENOMEM;
187 goto out;
188 }
189
190 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
191 tt = arfs_get_tt(type);
192 if (tt == -EINVAL) {
193 netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
194 __func__, type);
195 err = -EINVAL;
196 goto out;
197 }
198
199 dest.tir_num = tir[tt].tirn;
200
201 arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
202 &flow_act,
203 &dest, 1);
204 if (IS_ERR(arfs_t->default_rule)) {
205 err = PTR_ERR(arfs_t->default_rule);
206 arfs_t->default_rule = NULL;
207 netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
208 __func__, type);
209 }
210 out:
211 kvfree(spec);
212 return err;
213 }
214
215 #define MLX5E_ARFS_NUM_GROUPS 2
216 #define MLX5E_ARFS_GROUP1_SIZE BIT(12)
217 #define MLX5E_ARFS_GROUP2_SIZE BIT(0)
218 #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
219 MLX5E_ARFS_GROUP2_SIZE)
arfs_create_groups(struct mlx5e_flow_table * ft,enum arfs_type type)220 static int arfs_create_groups(struct mlx5e_flow_table *ft,
221 enum arfs_type type)
222 {
223 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
224 void *outer_headers_c;
225 int ix = 0;
226 u32 *in;
227 int err;
228 u8 *mc;
229
230 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
231 sizeof(*ft->g), GFP_KERNEL);
232 in = kvzalloc(inlen, GFP_KERNEL);
233 if (!in || !ft->g) {
234 kvfree(ft->g);
235 kvfree(in);
236 return -ENOMEM;
237 }
238
239 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
240 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
241 outer_headers);
242 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
243 switch (type) {
244 case ARFS_IPV4_TCP:
245 case ARFS_IPV6_TCP:
246 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
247 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
248 break;
249 case ARFS_IPV4_UDP:
250 case ARFS_IPV6_UDP:
251 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
252 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
253 break;
254 default:
255 err = -EINVAL;
256 goto out;
257 }
258
259 switch (type) {
260 case ARFS_IPV4_TCP:
261 case ARFS_IPV4_UDP:
262 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
263 src_ipv4_src_ipv6.ipv4_layout.ipv4);
264 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
265 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
266 break;
267 case ARFS_IPV6_TCP:
268 case ARFS_IPV6_UDP:
269 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
270 src_ipv4_src_ipv6.ipv6_layout.ipv6),
271 0xff, 16);
272 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
273 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
274 0xff, 16);
275 break;
276 default:
277 err = -EINVAL;
278 goto out;
279 }
280
281 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
282 MLX5_SET_CFG(in, start_flow_index, ix);
283 ix += MLX5E_ARFS_GROUP1_SIZE;
284 MLX5_SET_CFG(in, end_flow_index, ix - 1);
285 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
286 if (IS_ERR(ft->g[ft->num_groups]))
287 goto err;
288 ft->num_groups++;
289
290 memset(in, 0, inlen);
291 MLX5_SET_CFG(in, start_flow_index, ix);
292 ix += MLX5E_ARFS_GROUP2_SIZE;
293 MLX5_SET_CFG(in, end_flow_index, ix - 1);
294 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
295 if (IS_ERR(ft->g[ft->num_groups]))
296 goto err;
297 ft->num_groups++;
298
299 kvfree(in);
300 return 0;
301
302 err:
303 err = PTR_ERR(ft->g[ft->num_groups]);
304 ft->g[ft->num_groups] = NULL;
305 out:
306 kvfree(in);
307
308 return err;
309 }
310
arfs_create_table(struct mlx5e_priv * priv,enum arfs_type type)311 static int arfs_create_table(struct mlx5e_priv *priv,
312 enum arfs_type type)
313 {
314 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
315 struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
316 struct mlx5_flow_table_attr ft_attr = {};
317 int err;
318
319 ft->num_groups = 0;
320
321 ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
322 ft_attr.level = MLX5E_ARFS_FT_LEVEL;
323 ft_attr.prio = MLX5E_NIC_PRIO;
324
325 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
326 if (IS_ERR(ft->t)) {
327 err = PTR_ERR(ft->t);
328 ft->t = NULL;
329 return err;
330 }
331
332 err = arfs_create_groups(ft, type);
333 if (err)
334 goto err;
335
336 err = arfs_add_default_rule(priv, type);
337 if (err)
338 goto err;
339
340 return 0;
341 err:
342 mlx5e_destroy_flow_table(ft);
343 return err;
344 }
345
mlx5e_arfs_create_tables(struct mlx5e_priv * priv)346 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
347 {
348 int err = 0;
349 int i;
350
351 if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
352 return 0;
353
354 spin_lock_init(&priv->fs.arfs.arfs_lock);
355 INIT_LIST_HEAD(&priv->fs.arfs.rules);
356 priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
357 if (!priv->fs.arfs.wq)
358 return -ENOMEM;
359
360 for (i = 0; i < ARFS_NUM_TYPES; i++) {
361 err = arfs_create_table(priv, i);
362 if (err)
363 goto err;
364 }
365 return 0;
366 err:
367 mlx5e_arfs_destroy_tables(priv);
368 return err;
369 }
370
371 #define MLX5E_ARFS_EXPIRY_QUOTA 60
372
arfs_may_expire_flow(struct mlx5e_priv * priv)373 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
374 {
375 struct arfs_rule *arfs_rule;
376 struct hlist_node *htmp;
377 int quota = 0;
378 int i;
379 int j;
380
381 HLIST_HEAD(del_list);
382 spin_lock_bh(&priv->fs.arfs.arfs_lock);
383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
384 if (!work_pending(&arfs_rule->arfs_work) &&
385 rps_may_expire_flow(priv->netdev,
386 arfs_rule->rxq, arfs_rule->flow_id,
387 arfs_rule->filter_id)) {
388 hlist_del_init(&arfs_rule->hlist);
389 hlist_add_head(&arfs_rule->hlist, &del_list);
390 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
391 break;
392 }
393 }
394 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
395 hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
396 if (arfs_rule->rule)
397 mlx5_del_flow_rules(arfs_rule->rule);
398 hlist_del(&arfs_rule->hlist);
399 kfree(arfs_rule);
400 }
401 }
402
arfs_del_rules(struct mlx5e_priv * priv)403 static void arfs_del_rules(struct mlx5e_priv *priv)
404 {
405 struct hlist_node *htmp;
406 struct arfs_rule *rule;
407 int i;
408 int j;
409
410 HLIST_HEAD(del_list);
411 spin_lock_bh(&priv->fs.arfs.arfs_lock);
412 mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
413 hlist_del_init(&rule->hlist);
414 hlist_add_head(&rule->hlist, &del_list);
415 }
416 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
417
418 hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
419 cancel_work_sync(&rule->arfs_work);
420 if (rule->rule)
421 mlx5_del_flow_rules(rule->rule);
422 hlist_del(&rule->hlist);
423 kfree(rule);
424 }
425 }
426
427 static struct hlist_head *
arfs_hash_bucket(struct arfs_table * arfs_t,__be16 src_port,__be16 dst_port)428 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
429 __be16 dst_port)
430 {
431 unsigned long l;
432 int bucket_idx;
433
434 l = (__force unsigned long)src_port |
435 ((__force unsigned long)dst_port << 2);
436
437 bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
438
439 return &arfs_t->rules_hash[bucket_idx];
440 }
441
arfs_get_table(struct mlx5e_arfs_tables * arfs,u8 ip_proto,__be16 etype)442 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
443 u8 ip_proto, __be16 etype)
444 {
445 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
446 return &arfs->arfs_tables[ARFS_IPV4_TCP];
447 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
448 return &arfs->arfs_tables[ARFS_IPV4_UDP];
449 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
450 return &arfs->arfs_tables[ARFS_IPV6_TCP];
451 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
452 return &arfs->arfs_tables[ARFS_IPV6_UDP];
453
454 return NULL;
455 }
456
arfs_add_rule(struct mlx5e_priv * priv,struct arfs_rule * arfs_rule)457 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
458 struct arfs_rule *arfs_rule)
459 {
460 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
461 struct arfs_tuple *tuple = &arfs_rule->tuple;
462 struct mlx5_flow_handle *rule = NULL;
463 struct mlx5_flow_destination dest;
464 MLX5_DECLARE_FLOW_ACT(flow_act);
465 struct arfs_table *arfs_table;
466 struct mlx5_flow_spec *spec;
467 struct mlx5_flow_table *ft;
468 int err = 0;
469
470 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
471 if (!spec) {
472 err = -ENOMEM;
473 goto out;
474 }
475 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
476 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
477 outer_headers.ethertype);
478 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
479 ntohs(tuple->etype));
480 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
481 if (!arfs_table) {
482 err = -EINVAL;
483 goto out;
484 }
485
486 ft = arfs_table->ft.t;
487 if (tuple->ip_proto == IPPROTO_TCP) {
488 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
489 outer_headers.tcp_dport);
490 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
491 outer_headers.tcp_sport);
492 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
493 ntohs(tuple->dst_port));
494 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
495 ntohs(tuple->src_port));
496 } else {
497 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
498 outer_headers.udp_dport);
499 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
500 outer_headers.udp_sport);
501 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
502 ntohs(tuple->dst_port));
503 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
504 ntohs(tuple->src_port));
505 }
506 if (tuple->etype == htons(ETH_P_IP)) {
507 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
508 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
509 &tuple->src_ipv4,
510 4);
511 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
512 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
513 &tuple->dst_ipv4,
514 4);
515 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
516 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
517 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
518 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
519 } else {
520 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
521 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
522 &tuple->src_ipv6,
523 16);
524 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
525 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
526 &tuple->dst_ipv6,
527 16);
528 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
529 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
530 0xff,
531 16);
532 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
533 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534 0xff,
535 16);
536 }
537 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
538 dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
539 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
540 if (IS_ERR(rule)) {
541 err = PTR_ERR(rule);
542 netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
543 __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
544 }
545
546 out:
547 kvfree(spec);
548 return err ? ERR_PTR(err) : rule;
549 }
550
arfs_modify_rule_rq(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,u16 rxq)551 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
552 struct mlx5_flow_handle *rule, u16 rxq)
553 {
554 struct mlx5_flow_destination dst;
555 int err = 0;
556
557 dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
558 dst.tir_num = priv->direct_tir[rxq].tirn;
559 err = mlx5_modify_rule_destination(rule, &dst, NULL);
560 if (err)
561 netdev_warn(priv->netdev,
562 "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
563 }
564
arfs_handle_work(struct work_struct * work)565 static void arfs_handle_work(struct work_struct *work)
566 {
567 struct arfs_rule *arfs_rule = container_of(work,
568 struct arfs_rule,
569 arfs_work);
570 struct mlx5e_priv *priv = arfs_rule->priv;
571 struct mlx5_flow_handle *rule;
572
573 mutex_lock(&priv->state_lock);
574 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
575 spin_lock_bh(&priv->fs.arfs.arfs_lock);
576 hlist_del(&arfs_rule->hlist);
577 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
578
579 mutex_unlock(&priv->state_lock);
580 kfree(arfs_rule);
581 goto out;
582 }
583 mutex_unlock(&priv->state_lock);
584
585 if (!arfs_rule->rule) {
586 rule = arfs_add_rule(priv, arfs_rule);
587 if (IS_ERR(rule))
588 goto out;
589 arfs_rule->rule = rule;
590 } else {
591 arfs_modify_rule_rq(priv, arfs_rule->rule,
592 arfs_rule->rxq);
593 }
594 out:
595 arfs_may_expire_flow(priv);
596 }
597
arfs_alloc_rule(struct mlx5e_priv * priv,struct arfs_table * arfs_t,const struct flow_keys * fk,u16 rxq,u32 flow_id)598 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
599 struct arfs_table *arfs_t,
600 const struct flow_keys *fk,
601 u16 rxq, u32 flow_id)
602 {
603 struct arfs_rule *rule;
604 struct arfs_tuple *tuple;
605
606 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
607 if (!rule)
608 return NULL;
609
610 rule->priv = priv;
611 rule->rxq = rxq;
612 INIT_WORK(&rule->arfs_work, arfs_handle_work);
613
614 tuple = &rule->tuple;
615 tuple->etype = fk->basic.n_proto;
616 tuple->ip_proto = fk->basic.ip_proto;
617 if (tuple->etype == htons(ETH_P_IP)) {
618 tuple->src_ipv4 = fk->addrs.v4addrs.src;
619 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
620 } else {
621 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
622 sizeof(struct in6_addr));
623 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
624 sizeof(struct in6_addr));
625 }
626 tuple->src_port = fk->ports.src;
627 tuple->dst_port = fk->ports.dst;
628
629 rule->flow_id = flow_id;
630 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
631
632 hlist_add_head(&rule->hlist,
633 arfs_hash_bucket(arfs_t, tuple->src_port,
634 tuple->dst_port));
635 return rule;
636 }
637
arfs_cmp(const struct arfs_tuple * tuple,const struct flow_keys * fk)638 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
639 {
640 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
641 return false;
642 if (tuple->etype != fk->basic.n_proto)
643 return false;
644 if (tuple->etype == htons(ETH_P_IP))
645 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
646 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
647 if (tuple->etype == htons(ETH_P_IPV6))
648 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
649 sizeof(struct in6_addr)) &&
650 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
651 sizeof(struct in6_addr));
652 return false;
653 }
654
arfs_find_rule(struct arfs_table * arfs_t,const struct flow_keys * fk)655 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
656 const struct flow_keys *fk)
657 {
658 struct arfs_rule *arfs_rule;
659 struct hlist_head *head;
660
661 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
662 hlist_for_each_entry(arfs_rule, head, hlist) {
663 if (arfs_cmp(&arfs_rule->tuple, fk))
664 return arfs_rule;
665 }
666
667 return NULL;
668 }
669
mlx5e_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)670 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
671 u16 rxq_index, u32 flow_id)
672 {
673 struct mlx5e_priv *priv = netdev_priv(dev);
674 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
675 struct arfs_table *arfs_t;
676 struct arfs_rule *arfs_rule;
677 struct flow_keys fk;
678
679 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
680 return -EPROTONOSUPPORT;
681
682 if (fk.basic.n_proto != htons(ETH_P_IP) &&
683 fk.basic.n_proto != htons(ETH_P_IPV6))
684 return -EPROTONOSUPPORT;
685
686 if (skb->encapsulation)
687 return -EPROTONOSUPPORT;
688
689 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
690 if (!arfs_t)
691 return -EPROTONOSUPPORT;
692
693 spin_lock_bh(&arfs->arfs_lock);
694 arfs_rule = arfs_find_rule(arfs_t, &fk);
695 if (arfs_rule) {
696 if (arfs_rule->rxq == rxq_index) {
697 spin_unlock_bh(&arfs->arfs_lock);
698 return arfs_rule->filter_id;
699 }
700 arfs_rule->rxq = rxq_index;
701 } else {
702 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
703 if (!arfs_rule) {
704 spin_unlock_bh(&arfs->arfs_lock);
705 return -ENOMEM;
706 }
707 }
708 queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
709 spin_unlock_bh(&arfs->arfs_lock);
710 return arfs_rule->filter_id;
711 }
712 #endif
713