1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
48 #include <net/arp.h>
49 #include "en.h"
50 #include "en_rep.h"
51 #include "en_tc.h"
52 #include "eswitch.h"
53 #include "vxlan.h"
54
55 struct mlx5_nic_flow_attr {
56 u32 action;
57 u32 flow_tag;
58 u32 mod_hdr_id;
59 };
60
61 enum {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
63 MLX5E_TC_FLOW_NIC = BIT(1),
64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65 };
66
67 struct mlx5e_tc_flow {
68 struct rhash_head node;
69 u64 cookie;
70 u8 flags;
71 struct mlx5_flow_handle *rule;
72 struct list_head encap; /* flows sharing the same encap ID */
73 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
74 union {
75 struct mlx5_esw_flow_attr esw_attr[0];
76 struct mlx5_nic_flow_attr nic_attr[0];
77 };
78 };
79
80 struct mlx5e_tc_flow_parse_attr {
81 struct ip_tunnel_info tun_info;
82 struct mlx5_flow_spec spec;
83 int num_mod_hdr_actions;
84 int max_mod_hdr_actions;
85 void *mod_hdr_actions;
86 int mirred_ifindex;
87 };
88
89 enum {
90 MLX5_HEADER_TYPE_VXLAN = 0x0,
91 MLX5_HEADER_TYPE_NVGRE = 0x1,
92 };
93
94 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
95 #define MLX5E_TC_TABLE_NUM_GROUPS 4
96
97 struct mod_hdr_key {
98 int num_actions;
99 void *actions;
100 };
101
102 struct mlx5e_mod_hdr_entry {
103 /* a node of a hash table which keeps all the mod_hdr entries */
104 struct hlist_node mod_hdr_hlist;
105
106 /* flows sharing the same mod_hdr entry */
107 struct list_head flows;
108
109 struct mod_hdr_key key;
110
111 u32 mod_hdr_id;
112 };
113
114 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
115
hash_mod_hdr_info(struct mod_hdr_key * key)116 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
117 {
118 return jhash(key->actions,
119 key->num_actions * MLX5_MH_ACT_SZ, 0);
120 }
121
cmp_mod_hdr_info(struct mod_hdr_key * a,struct mod_hdr_key * b)122 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
123 struct mod_hdr_key *b)
124 {
125 if (a->num_actions != b->num_actions)
126 return 1;
127
128 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
129 }
130
mlx5e_attach_mod_hdr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5e_tc_flow_parse_attr * parse_attr)131 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
132 struct mlx5e_tc_flow *flow,
133 struct mlx5e_tc_flow_parse_attr *parse_attr)
134 {
135 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
136 int num_actions, actions_size, namespace, err;
137 struct mlx5e_mod_hdr_entry *mh;
138 struct mod_hdr_key key;
139 bool found = false;
140 u32 hash_key;
141
142 num_actions = parse_attr->num_mod_hdr_actions;
143 actions_size = MLX5_MH_ACT_SZ * num_actions;
144
145 key.actions = parse_attr->mod_hdr_actions;
146 key.num_actions = num_actions;
147
148 hash_key = hash_mod_hdr_info(&key);
149
150 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
151 namespace = MLX5_FLOW_NAMESPACE_FDB;
152 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
153 mod_hdr_hlist, hash_key) {
154 if (!cmp_mod_hdr_info(&mh->key, &key)) {
155 found = true;
156 break;
157 }
158 }
159 } else {
160 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
161 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
162 mod_hdr_hlist, hash_key) {
163 if (!cmp_mod_hdr_info(&mh->key, &key)) {
164 found = true;
165 break;
166 }
167 }
168 }
169
170 if (found)
171 goto attach_flow;
172
173 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
174 if (!mh)
175 return -ENOMEM;
176
177 mh->key.actions = (void *)mh + sizeof(*mh);
178 memcpy(mh->key.actions, key.actions, actions_size);
179 mh->key.num_actions = num_actions;
180 INIT_LIST_HEAD(&mh->flows);
181
182 err = mlx5_modify_header_alloc(priv->mdev, namespace,
183 mh->key.num_actions,
184 mh->key.actions,
185 &mh->mod_hdr_id);
186 if (err)
187 goto out_err;
188
189 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
190 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
191 else
192 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
193
194 attach_flow:
195 list_add(&flow->mod_hdr, &mh->flows);
196 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
197 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
198 else
199 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
200
201 return 0;
202
203 out_err:
204 kfree(mh);
205 return err;
206 }
207
mlx5e_detach_mod_hdr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)208 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
209 struct mlx5e_tc_flow *flow)
210 {
211 struct list_head *next = flow->mod_hdr.next;
212
213 list_del(&flow->mod_hdr);
214
215 if (list_empty(next)) {
216 struct mlx5e_mod_hdr_entry *mh;
217
218 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
219
220 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
221 hash_del(&mh->mod_hdr_hlist);
222 kfree(mh);
223 }
224 }
225
226 static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow)227 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
228 struct mlx5e_tc_flow_parse_attr *parse_attr,
229 struct mlx5e_tc_flow *flow)
230 {
231 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
232 struct mlx5_core_dev *dev = priv->mdev;
233 struct mlx5_flow_destination dest = {};
234 struct mlx5_flow_act flow_act = {
235 .action = attr->action,
236 .flow_tag = attr->flow_tag,
237 .encap_id = 0,
238 };
239 struct mlx5_fc *counter = NULL;
240 struct mlx5_flow_handle *rule;
241 bool table_created = false;
242 int err;
243
244 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
245 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
246 dest.ft = priv->fs.vlan.ft.t;
247 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
248 counter = mlx5_fc_create(dev, true);
249 if (IS_ERR(counter))
250 return ERR_CAST(counter);
251
252 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
253 dest.counter = counter;
254 }
255
256 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
257 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
258 flow_act.modify_id = attr->mod_hdr_id;
259 kfree(parse_attr->mod_hdr_actions);
260 if (err) {
261 rule = ERR_PTR(err);
262 goto err_create_mod_hdr_id;
263 }
264 }
265
266 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
267 priv->fs.tc.t =
268 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
269 MLX5E_TC_PRIO,
270 MLX5E_TC_TABLE_NUM_ENTRIES,
271 MLX5E_TC_TABLE_NUM_GROUPS,
272 0, 0);
273 if (IS_ERR(priv->fs.tc.t)) {
274 netdev_err(priv->netdev,
275 "Failed to create tc offload table\n");
276 rule = ERR_CAST(priv->fs.tc.t);
277 goto err_create_ft;
278 }
279
280 table_created = true;
281 }
282
283 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
284 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
285 &flow_act, &dest, 1);
286
287 if (IS_ERR(rule))
288 goto err_add_rule;
289
290 return rule;
291
292 err_add_rule:
293 if (table_created) {
294 mlx5_destroy_flow_table(priv->fs.tc.t);
295 priv->fs.tc.t = NULL;
296 }
297 err_create_ft:
298 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
299 mlx5e_detach_mod_hdr(priv, flow);
300 err_create_mod_hdr_id:
301 mlx5_fc_destroy(dev, counter);
302
303 return rule;
304 }
305
mlx5e_tc_del_nic_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)306 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
307 struct mlx5e_tc_flow *flow)
308 {
309 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
310 struct mlx5_fc *counter = NULL;
311
312 counter = mlx5_flow_rule_counter(flow->rule);
313 mlx5_del_flow_rules(flow->rule);
314 mlx5_fc_destroy(priv->mdev, counter);
315
316 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
317 mlx5_destroy_flow_table(priv->fs.tc.t);
318 priv->fs.tc.t = NULL;
319 }
320
321 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
322 mlx5e_detach_mod_hdr(priv, flow);
323 }
324
325 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
326 struct mlx5e_tc_flow *flow);
327
328 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
329 struct ip_tunnel_info *tun_info,
330 struct net_device *mirred_dev,
331 struct net_device **encap_dev,
332 struct mlx5e_tc_flow *flow);
333
334 static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow)335 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
336 struct mlx5e_tc_flow_parse_attr *parse_attr,
337 struct mlx5e_tc_flow *flow)
338 {
339 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
340 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
341 struct net_device *out_dev, *encap_dev = NULL;
342 struct mlx5_flow_handle *rule = NULL;
343 struct mlx5e_rep_priv *rpriv;
344 struct mlx5e_priv *out_priv;
345 int err;
346
347 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
348 out_dev = __dev_get_by_index(dev_net(priv->netdev),
349 attr->parse_attr->mirred_ifindex);
350 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
351 out_dev, &encap_dev, flow);
352 if (err) {
353 rule = ERR_PTR(err);
354 if (err != -EAGAIN)
355 goto err_attach_encap;
356 }
357 out_priv = netdev_priv(encap_dev);
358 rpriv = out_priv->ppriv;
359 attr->out_rep = rpriv->rep;
360 }
361
362 err = mlx5_eswitch_add_vlan_action(esw, attr);
363 if (err) {
364 rule = ERR_PTR(err);
365 goto err_add_vlan;
366 }
367
368 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
369 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
370 kfree(parse_attr->mod_hdr_actions);
371 if (err) {
372 rule = ERR_PTR(err);
373 goto err_mod_hdr;
374 }
375 }
376
377 /* we get here if (1) there's no error (rule being null) or when
378 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
379 */
380 if (rule != ERR_PTR(-EAGAIN)) {
381 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
382 if (IS_ERR(rule))
383 goto err_add_rule;
384 }
385 return rule;
386
387 err_add_rule:
388 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
389 mlx5e_detach_mod_hdr(priv, flow);
390 err_mod_hdr:
391 mlx5_eswitch_del_vlan_action(esw, attr);
392 err_add_vlan:
393 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
394 mlx5e_detach_encap(priv, flow);
395 err_attach_encap:
396 return rule;
397 }
398
mlx5e_tc_del_fdb_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)399 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
400 struct mlx5e_tc_flow *flow)
401 {
402 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
403 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
404
405 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
406 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
407 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
408 }
409
410 mlx5_eswitch_del_vlan_action(esw, attr);
411
412 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
413 mlx5e_detach_encap(priv, flow);
414 kvfree(attr->parse_attr);
415 }
416
417 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
418 mlx5e_detach_mod_hdr(priv, flow);
419 }
420
mlx5e_tc_encap_flows_add(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)421 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
422 struct mlx5e_encap_entry *e)
423 {
424 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
425 struct mlx5_esw_flow_attr *esw_attr;
426 struct mlx5e_tc_flow *flow;
427 int err;
428
429 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
430 e->encap_size, e->encap_header,
431 &e->encap_id);
432 if (err) {
433 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
434 err);
435 return;
436 }
437 e->flags |= MLX5_ENCAP_ENTRY_VALID;
438 mlx5e_rep_queue_neigh_stats_work(priv);
439
440 list_for_each_entry(flow, &e->flows, encap) {
441 esw_attr = flow->esw_attr;
442 esw_attr->encap_id = e->encap_id;
443 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
444 if (IS_ERR(flow->rule)) {
445 err = PTR_ERR(flow->rule);
446 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
447 err);
448 continue;
449 }
450 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
451 }
452 }
453
mlx5e_tc_encap_flows_del(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)454 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
455 struct mlx5e_encap_entry *e)
456 {
457 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458 struct mlx5e_tc_flow *flow;
459
460 list_for_each_entry(flow, &e->flows, encap) {
461 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
462 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
463 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
464 }
465 }
466
467 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
468 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
469 mlx5_encap_dealloc(priv->mdev, e->encap_id);
470 }
471 }
472
mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry * nhe)473 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
474 {
475 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
476 struct mlx5e_tc_flow *flow;
477 struct mlx5e_encap_entry *e;
478 struct mlx5_fc *counter;
479 struct neigh_table *tbl;
480 bool neigh_used = false;
481 struct neighbour *n;
482 u64 lastuse;
483
484 if (m_neigh->family == AF_INET)
485 tbl = &arp_tbl;
486 #if IS_ENABLED(CONFIG_IPV6)
487 else if (m_neigh->family == AF_INET6)
488 tbl = &nd_tbl;
489 #endif
490 else
491 return;
492
493 list_for_each_entry(e, &nhe->encap_list, encap_list) {
494 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
495 continue;
496 list_for_each_entry(flow, &e->flows, encap) {
497 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
498 counter = mlx5_flow_rule_counter(flow->rule);
499 lastuse = mlx5_fc_query_lastuse(counter);
500 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
501 neigh_used = true;
502 break;
503 }
504 }
505 }
506 }
507
508 if (neigh_used) {
509 nhe->reported_lastuse = jiffies;
510
511 /* find the relevant neigh according to the cached device and
512 * dst ip pair
513 */
514 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
515 if (!n) {
516 WARN(1, "The neighbour already freed\n");
517 return;
518 }
519
520 neigh_event_send(n, NULL);
521 neigh_release(n);
522 }
523 }
524
mlx5e_detach_encap(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)525 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
526 struct mlx5e_tc_flow *flow)
527 {
528 struct list_head *next = flow->encap.next;
529
530 list_del(&flow->encap);
531 if (list_empty(next)) {
532 struct mlx5e_encap_entry *e;
533
534 e = list_entry(next, struct mlx5e_encap_entry, flows);
535 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
536
537 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
538 mlx5_encap_dealloc(priv->mdev, e->encap_id);
539
540 hash_del_rcu(&e->encap_hlist);
541 kfree(e->encap_header);
542 kfree(e);
543 }
544 }
545
mlx5e_tc_del_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)546 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
547 struct mlx5e_tc_flow *flow)
548 {
549 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
550 mlx5e_tc_del_fdb_flow(priv, flow);
551 else
552 mlx5e_tc_del_nic_flow(priv, flow);
553 }
554
parse_vxlan_attr(struct mlx5_flow_spec * spec,struct tc_cls_flower_offload * f)555 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
556 struct tc_cls_flower_offload *f)
557 {
558 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
559 outer_headers);
560 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
561 outer_headers);
562 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
563 misc_parameters);
564 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
565 misc_parameters);
566
567 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
568 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
569
570 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
571 struct flow_dissector_key_keyid *key =
572 skb_flow_dissector_target(f->dissector,
573 FLOW_DISSECTOR_KEY_ENC_KEYID,
574 f->key);
575 struct flow_dissector_key_keyid *mask =
576 skb_flow_dissector_target(f->dissector,
577 FLOW_DISSECTOR_KEY_ENC_KEYID,
578 f->mask);
579 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
580 be32_to_cpu(mask->keyid));
581 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
582 be32_to_cpu(key->keyid));
583 }
584 }
585
parse_tunnel_attr(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct tc_cls_flower_offload * f)586 static int parse_tunnel_attr(struct mlx5e_priv *priv,
587 struct mlx5_flow_spec *spec,
588 struct tc_cls_flower_offload *f)
589 {
590 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
591 outer_headers);
592 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
593 outer_headers);
594
595 struct flow_dissector_key_control *enc_control =
596 skb_flow_dissector_target(f->dissector,
597 FLOW_DISSECTOR_KEY_ENC_CONTROL,
598 f->key);
599
600 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
601 struct flow_dissector_key_ports *key =
602 skb_flow_dissector_target(f->dissector,
603 FLOW_DISSECTOR_KEY_ENC_PORTS,
604 f->key);
605 struct flow_dissector_key_ports *mask =
606 skb_flow_dissector_target(f->dissector,
607 FLOW_DISSECTOR_KEY_ENC_PORTS,
608 f->mask);
609 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
610 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
611 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
612
613 /* Full udp dst port must be given */
614 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
615 goto vxlan_match_offload_err;
616
617 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
618 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
619 parse_vxlan_attr(spec, f);
620 else {
621 netdev_warn(priv->netdev,
622 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
623 return -EOPNOTSUPP;
624 }
625
626 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
627 udp_dport, ntohs(mask->dst));
628 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
629 udp_dport, ntohs(key->dst));
630
631 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
632 udp_sport, ntohs(mask->src));
633 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
634 udp_sport, ntohs(key->src));
635 } else { /* udp dst port must be given */
636 vxlan_match_offload_err:
637 netdev_warn(priv->netdev,
638 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
639 return -EOPNOTSUPP;
640 }
641
642 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
643 struct flow_dissector_key_ipv4_addrs *key =
644 skb_flow_dissector_target(f->dissector,
645 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
646 f->key);
647 struct flow_dissector_key_ipv4_addrs *mask =
648 skb_flow_dissector_target(f->dissector,
649 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
650 f->mask);
651 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
652 src_ipv4_src_ipv6.ipv4_layout.ipv4,
653 ntohl(mask->src));
654 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
655 src_ipv4_src_ipv6.ipv4_layout.ipv4,
656 ntohl(key->src));
657
658 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
659 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
660 ntohl(mask->dst));
661 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
662 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
663 ntohl(key->dst));
664
665 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
666 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
667 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
668 struct flow_dissector_key_ipv6_addrs *key =
669 skb_flow_dissector_target(f->dissector,
670 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
671 f->key);
672 struct flow_dissector_key_ipv6_addrs *mask =
673 skb_flow_dissector_target(f->dissector,
674 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
675 f->mask);
676
677 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
678 src_ipv4_src_ipv6.ipv6_layout.ipv6),
679 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
680 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
681 src_ipv4_src_ipv6.ipv6_layout.ipv6),
682 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
683
684 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
685 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
686 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
687 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
688 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
689 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
690
691 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
692 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
693 }
694
695 /* Enforce DMAC when offloading incoming tunneled flows.
696 * Flow counters require a match on the DMAC.
697 */
698 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
699 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
700 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
701 dmac_47_16), priv->netdev->dev_addr);
702
703 /* let software handle IP fragments */
704 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
706
707 return 0;
708 }
709
__parse_cls_flower(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct tc_cls_flower_offload * f,u8 * min_inline)710 static int __parse_cls_flower(struct mlx5e_priv *priv,
711 struct mlx5_flow_spec *spec,
712 struct tc_cls_flower_offload *f,
713 u8 *min_inline)
714 {
715 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
716 outer_headers);
717 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
718 outer_headers);
719 u16 addr_type = 0;
720 u8 ip_proto = 0;
721
722 *min_inline = MLX5_INLINE_MODE_L2;
723
724 if (f->dissector->used_keys &
725 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
726 BIT(FLOW_DISSECTOR_KEY_BASIC) |
727 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
728 BIT(FLOW_DISSECTOR_KEY_VLAN) |
729 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
730 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
731 BIT(FLOW_DISSECTOR_KEY_PORTS) |
732 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
733 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
734 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
735 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
736 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
737 BIT(FLOW_DISSECTOR_KEY_TCP) |
738 BIT(FLOW_DISSECTOR_KEY_IP))) {
739 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
740 f->dissector->used_keys);
741 return -EOPNOTSUPP;
742 }
743
744 if ((dissector_uses_key(f->dissector,
745 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
746 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
747 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
748 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
749 struct flow_dissector_key_control *key =
750 skb_flow_dissector_target(f->dissector,
751 FLOW_DISSECTOR_KEY_ENC_CONTROL,
752 f->key);
753 switch (key->addr_type) {
754 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
755 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
756 if (parse_tunnel_attr(priv, spec, f))
757 return -EOPNOTSUPP;
758 break;
759 default:
760 return -EOPNOTSUPP;
761 }
762
763 /* In decap flow, header pointers should point to the inner
764 * headers, outer header were already set by parse_tunnel_attr
765 */
766 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
767 inner_headers);
768 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
769 inner_headers);
770 }
771
772 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
773 struct flow_dissector_key_control *key =
774 skb_flow_dissector_target(f->dissector,
775 FLOW_DISSECTOR_KEY_CONTROL,
776 f->key);
777
778 struct flow_dissector_key_control *mask =
779 skb_flow_dissector_target(f->dissector,
780 FLOW_DISSECTOR_KEY_CONTROL,
781 f->mask);
782 addr_type = key->addr_type;
783
784 /* the HW doesn't support frag first/later */
785 if (mask->flags & FLOW_DIS_FIRST_FRAG)
786 return -EOPNOTSUPP;
787
788 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
789 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
790 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
791 key->flags & FLOW_DIS_IS_FRAGMENT);
792
793 /* the HW doesn't need L3 inline to match on frag=no */
794 if (key->flags & FLOW_DIS_IS_FRAGMENT)
795 *min_inline = MLX5_INLINE_MODE_IP;
796 }
797 }
798
799 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
800 struct flow_dissector_key_basic *key =
801 skb_flow_dissector_target(f->dissector,
802 FLOW_DISSECTOR_KEY_BASIC,
803 f->key);
804 struct flow_dissector_key_basic *mask =
805 skb_flow_dissector_target(f->dissector,
806 FLOW_DISSECTOR_KEY_BASIC,
807 f->mask);
808 ip_proto = key->ip_proto;
809
810 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
811 ntohs(mask->n_proto));
812 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
813 ntohs(key->n_proto));
814
815 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
816 mask->ip_proto);
817 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
818 key->ip_proto);
819
820 if (mask->ip_proto)
821 *min_inline = MLX5_INLINE_MODE_IP;
822 }
823
824 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
825 struct flow_dissector_key_eth_addrs *key =
826 skb_flow_dissector_target(f->dissector,
827 FLOW_DISSECTOR_KEY_ETH_ADDRS,
828 f->key);
829 struct flow_dissector_key_eth_addrs *mask =
830 skb_flow_dissector_target(f->dissector,
831 FLOW_DISSECTOR_KEY_ETH_ADDRS,
832 f->mask);
833
834 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
835 dmac_47_16),
836 mask->dst);
837 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
838 dmac_47_16),
839 key->dst);
840
841 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
842 smac_47_16),
843 mask->src);
844 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
845 smac_47_16),
846 key->src);
847 }
848
849 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
850 struct flow_dissector_key_vlan *key =
851 skb_flow_dissector_target(f->dissector,
852 FLOW_DISSECTOR_KEY_VLAN,
853 f->key);
854 struct flow_dissector_key_vlan *mask =
855 skb_flow_dissector_target(f->dissector,
856 FLOW_DISSECTOR_KEY_VLAN,
857 f->mask);
858 if (mask->vlan_id || mask->vlan_priority) {
859 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
860 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
861
862 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
863 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
864
865 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
866 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
867 }
868 } else {
869 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
870 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
871 }
872
873 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
874 struct flow_dissector_key_ipv4_addrs *key =
875 skb_flow_dissector_target(f->dissector,
876 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
877 f->key);
878 struct flow_dissector_key_ipv4_addrs *mask =
879 skb_flow_dissector_target(f->dissector,
880 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
881 f->mask);
882
883 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
884 src_ipv4_src_ipv6.ipv4_layout.ipv4),
885 &mask->src, sizeof(mask->src));
886 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
887 src_ipv4_src_ipv6.ipv4_layout.ipv4),
888 &key->src, sizeof(key->src));
889 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
890 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
891 &mask->dst, sizeof(mask->dst));
892 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
893 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
894 &key->dst, sizeof(key->dst));
895
896 if (mask->src || mask->dst)
897 *min_inline = MLX5_INLINE_MODE_IP;
898 }
899
900 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
901 struct flow_dissector_key_ipv6_addrs *key =
902 skb_flow_dissector_target(f->dissector,
903 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
904 f->key);
905 struct flow_dissector_key_ipv6_addrs *mask =
906 skb_flow_dissector_target(f->dissector,
907 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
908 f->mask);
909
910 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
911 src_ipv4_src_ipv6.ipv6_layout.ipv6),
912 &mask->src, sizeof(mask->src));
913 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
914 src_ipv4_src_ipv6.ipv6_layout.ipv6),
915 &key->src, sizeof(key->src));
916
917 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
918 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
919 &mask->dst, sizeof(mask->dst));
920 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
921 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
922 &key->dst, sizeof(key->dst));
923
924 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
925 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
926 *min_inline = MLX5_INLINE_MODE_IP;
927 }
928
929 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
930 struct flow_dissector_key_ip *key =
931 skb_flow_dissector_target(f->dissector,
932 FLOW_DISSECTOR_KEY_IP,
933 f->key);
934 struct flow_dissector_key_ip *mask =
935 skb_flow_dissector_target(f->dissector,
936 FLOW_DISSECTOR_KEY_IP,
937 f->mask);
938
939 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
940 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
941
942 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
943 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
944
945 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
946 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
947
948 if (mask->ttl &&
949 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
950 ft_field_support.outer_ipv4_ttl))
951 return -EOPNOTSUPP;
952
953 if (mask->tos || mask->ttl)
954 *min_inline = MLX5_INLINE_MODE_IP;
955 }
956
957 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
958 struct flow_dissector_key_ports *key =
959 skb_flow_dissector_target(f->dissector,
960 FLOW_DISSECTOR_KEY_PORTS,
961 f->key);
962 struct flow_dissector_key_ports *mask =
963 skb_flow_dissector_target(f->dissector,
964 FLOW_DISSECTOR_KEY_PORTS,
965 f->mask);
966 switch (ip_proto) {
967 case IPPROTO_TCP:
968 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
969 tcp_sport, ntohs(mask->src));
970 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
971 tcp_sport, ntohs(key->src));
972
973 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
974 tcp_dport, ntohs(mask->dst));
975 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
976 tcp_dport, ntohs(key->dst));
977 break;
978
979 case IPPROTO_UDP:
980 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
981 udp_sport, ntohs(mask->src));
982 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
983 udp_sport, ntohs(key->src));
984
985 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
986 udp_dport, ntohs(mask->dst));
987 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
988 udp_dport, ntohs(key->dst));
989 break;
990 default:
991 netdev_err(priv->netdev,
992 "Only UDP and TCP transport are supported\n");
993 return -EINVAL;
994 }
995
996 if (mask->src || mask->dst)
997 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
998 }
999
1000 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1001 struct flow_dissector_key_tcp *key =
1002 skb_flow_dissector_target(f->dissector,
1003 FLOW_DISSECTOR_KEY_TCP,
1004 f->key);
1005 struct flow_dissector_key_tcp *mask =
1006 skb_flow_dissector_target(f->dissector,
1007 FLOW_DISSECTOR_KEY_TCP,
1008 f->mask);
1009
1010 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1011 ntohs(mask->flags));
1012 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1013 ntohs(key->flags));
1014
1015 if (mask->flags)
1016 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1017 }
1018
1019 return 0;
1020 }
1021
parse_cls_flower(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec,struct tc_cls_flower_offload * f)1022 static int parse_cls_flower(struct mlx5e_priv *priv,
1023 struct mlx5e_tc_flow *flow,
1024 struct mlx5_flow_spec *spec,
1025 struct tc_cls_flower_offload *f)
1026 {
1027 struct mlx5_core_dev *dev = priv->mdev;
1028 struct mlx5_eswitch *esw = dev->priv.eswitch;
1029 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1030 struct mlx5_eswitch_rep *rep;
1031 u8 min_inline;
1032 int err;
1033
1034 err = __parse_cls_flower(priv, spec, f, &min_inline);
1035
1036 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1037 rep = rpriv->rep;
1038 if (rep->vport != FDB_UPLINK_VPORT &&
1039 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1040 esw->offloads.inline_mode < min_inline)) {
1041 netdev_warn(priv->netdev,
1042 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1043 min_inline, esw->offloads.inline_mode);
1044 return -EOPNOTSUPP;
1045 }
1046 }
1047
1048 return err;
1049 }
1050
1051 struct pedit_headers {
1052 struct ethhdr eth;
1053 struct iphdr ip4;
1054 struct ipv6hdr ip6;
1055 struct tcphdr tcp;
1056 struct udphdr udp;
1057 };
1058
1059 static int pedit_header_offsets[] = {
1060 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1061 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1062 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1063 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1064 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1065 };
1066
1067 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1068
set_pedit_val(u8 hdr_type,u32 mask,u32 val,u32 offset,struct pedit_headers * masks,struct pedit_headers * vals)1069 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1070 struct pedit_headers *masks,
1071 struct pedit_headers *vals)
1072 {
1073 u32 *curr_pmask, *curr_pval;
1074
1075 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1076 goto out_err;
1077
1078 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1079 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1080
1081 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1082 goto out_err;
1083
1084 *curr_pmask |= mask;
1085 *curr_pval |= (val & mask);
1086
1087 return 0;
1088
1089 out_err:
1090 return -EOPNOTSUPP;
1091 }
1092
1093 struct mlx5_fields {
1094 u8 field;
1095 u8 size;
1096 u32 offset;
1097 };
1098
1099 #define OFFLOAD(fw_field, size, field, off) \
1100 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1101
1102 static struct mlx5_fields fields[] = {
1103 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1104 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1105 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1106 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1107 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1108 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1109
1110 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1111 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1112 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1113
1114 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1115 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1116 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1117 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1118 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1119 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1120 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1121 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1122 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1123
1124 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1125 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1126 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1127
1128 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1129 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1130 };
1131
1132 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1133 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1134 * says how many HW actions were actually parsed.
1135 */
offload_pedit_fields(struct pedit_headers * masks,struct pedit_headers * vals,struct mlx5e_tc_flow_parse_attr * parse_attr)1136 static int offload_pedit_fields(struct pedit_headers *masks,
1137 struct pedit_headers *vals,
1138 struct mlx5e_tc_flow_parse_attr *parse_attr)
1139 {
1140 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1141 int i, action_size, nactions, max_actions, first, last, next_z;
1142 void *s_masks_p, *a_masks_p, *vals_p;
1143 struct mlx5_fields *f;
1144 u8 cmd, field_bsize;
1145 u32 s_mask, a_mask;
1146 unsigned long mask;
1147 __be32 mask_be32;
1148 __be16 mask_be16;
1149 void *action;
1150
1151 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1152 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1153 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1154 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1155
1156 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1157 action = parse_attr->mod_hdr_actions +
1158 parse_attr->num_mod_hdr_actions * action_size;
1159
1160 max_actions = parse_attr->max_mod_hdr_actions;
1161 nactions = parse_attr->num_mod_hdr_actions;
1162
1163 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1164 f = &fields[i];
1165 /* avoid seeing bits set from previous iterations */
1166 s_mask = 0;
1167 a_mask = 0;
1168
1169 s_masks_p = (void *)set_masks + f->offset;
1170 a_masks_p = (void *)add_masks + f->offset;
1171
1172 memcpy(&s_mask, s_masks_p, f->size);
1173 memcpy(&a_mask, a_masks_p, f->size);
1174
1175 if (!s_mask && !a_mask) /* nothing to offload here */
1176 continue;
1177
1178 if (s_mask && a_mask) {
1179 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1180 return -EOPNOTSUPP;
1181 }
1182
1183 if (nactions == max_actions) {
1184 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1185 return -EOPNOTSUPP;
1186 }
1187
1188 if (s_mask) {
1189 cmd = MLX5_ACTION_TYPE_SET;
1190 mask = s_mask;
1191 vals_p = (void *)set_vals + f->offset;
1192 /* clear to denote we consumed this field */
1193 memset(s_masks_p, 0, f->size);
1194 } else {
1195 cmd = MLX5_ACTION_TYPE_ADD;
1196 mask = a_mask;
1197 vals_p = (void *)add_vals + f->offset;
1198 /* clear to denote we consumed this field */
1199 memset(a_masks_p, 0, f->size);
1200 }
1201
1202 field_bsize = f->size * BITS_PER_BYTE;
1203
1204 if (field_bsize == 32) {
1205 mask_be32 = *(__be32 *)&mask;
1206 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1207 } else if (field_bsize == 16) {
1208 mask_be16 = *(__be16 *)&mask;
1209 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1210 }
1211
1212 first = find_first_bit(&mask, field_bsize);
1213 next_z = find_next_zero_bit(&mask, field_bsize, first);
1214 last = find_last_bit(&mask, field_bsize);
1215 if (first < next_z && next_z < last) {
1216 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1217 mask);
1218 return -EOPNOTSUPP;
1219 }
1220
1221 MLX5_SET(set_action_in, action, action_type, cmd);
1222 MLX5_SET(set_action_in, action, field, f->field);
1223
1224 if (cmd == MLX5_ACTION_TYPE_SET) {
1225 MLX5_SET(set_action_in, action, offset, first);
1226 /* length is num of bits to be written, zero means length of 32 */
1227 MLX5_SET(set_action_in, action, length, (last - first + 1));
1228 }
1229
1230 if (field_bsize == 32)
1231 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1232 else if (field_bsize == 16)
1233 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1234 else if (field_bsize == 8)
1235 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1236
1237 action += action_size;
1238 nactions++;
1239 }
1240
1241 parse_attr->num_mod_hdr_actions = nactions;
1242 return 0;
1243 }
1244
alloc_mod_hdr_actions(struct mlx5e_priv * priv,const struct tc_action * a,int namespace,struct mlx5e_tc_flow_parse_attr * parse_attr)1245 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1246 const struct tc_action *a, int namespace,
1247 struct mlx5e_tc_flow_parse_attr *parse_attr)
1248 {
1249 int nkeys, action_size, max_actions;
1250
1251 nkeys = tcf_pedit_nkeys(a);
1252 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1253
1254 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1255 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1256 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1257 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1258
1259 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1260 max_actions = min(max_actions, nkeys * 16);
1261
1262 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1263 if (!parse_attr->mod_hdr_actions)
1264 return -ENOMEM;
1265
1266 parse_attr->max_mod_hdr_actions = max_actions;
1267 return 0;
1268 }
1269
1270 static const struct pedit_headers zero_masks = {};
1271
parse_tc_pedit_action(struct mlx5e_priv * priv,const struct tc_action * a,int namespace,struct mlx5e_tc_flow_parse_attr * parse_attr)1272 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1273 const struct tc_action *a, int namespace,
1274 struct mlx5e_tc_flow_parse_attr *parse_attr)
1275 {
1276 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1277 int nkeys, i, err = -EOPNOTSUPP;
1278 u32 mask, val, offset;
1279 u8 cmd, htype;
1280
1281 nkeys = tcf_pedit_nkeys(a);
1282
1283 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1284 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1285
1286 for (i = 0; i < nkeys; i++) {
1287 htype = tcf_pedit_htype(a, i);
1288 cmd = tcf_pedit_cmd(a, i);
1289 err = -EOPNOTSUPP; /* can't be all optimistic */
1290
1291 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1292 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1293 goto out_err;
1294 }
1295
1296 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1297 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1298 goto out_err;
1299 }
1300
1301 mask = tcf_pedit_mask(a, i);
1302 val = tcf_pedit_val(a, i);
1303 offset = tcf_pedit_offset(a, i);
1304
1305 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1306 if (err)
1307 goto out_err;
1308 }
1309
1310 if (!parse_attr->mod_hdr_actions) {
1311 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1312 if (err)
1313 goto out_err;
1314 }
1315
1316 err = offload_pedit_fields(masks, vals, parse_attr);
1317 if (err < 0)
1318 goto out_dealloc_parsed_actions;
1319
1320 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1321 cmd_masks = &masks[cmd];
1322 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1323 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1324 cmd);
1325 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1326 16, 1, cmd_masks, sizeof(zero_masks), true);
1327 err = -EOPNOTSUPP;
1328 goto out_dealloc_parsed_actions;
1329 }
1330 }
1331
1332 return 0;
1333
1334 out_dealloc_parsed_actions:
1335 kfree(parse_attr->mod_hdr_actions);
1336 out_err:
1337 return err;
1338 }
1339
csum_offload_supported(struct mlx5e_priv * priv,u32 action,u32 update_flags)1340 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1341 {
1342 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1343 TCA_CSUM_UPDATE_FLAG_UDP;
1344
1345 /* The HW recalcs checksums only if re-writing headers */
1346 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1347 netdev_warn(priv->netdev,
1348 "TC csum action is only offloaded with pedit\n");
1349 return false;
1350 }
1351
1352 if (update_flags & ~prot_flags) {
1353 netdev_warn(priv->netdev,
1354 "can't offload TC csum action for some header/s - flags %#x\n",
1355 update_flags);
1356 return false;
1357 }
1358
1359 return true;
1360 }
1361
modify_header_match_supported(struct mlx5_flow_spec * spec,struct tcf_exts * exts)1362 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1363 struct tcf_exts *exts)
1364 {
1365 const struct tc_action *a;
1366 bool modify_ip_header;
1367 LIST_HEAD(actions);
1368 u8 htype, ip_proto;
1369 void *headers_v;
1370 u16 ethertype;
1371 int nkeys, i;
1372
1373 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1374 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1375
1376 /* for non-IP we only re-write MACs, so we're okay */
1377 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1378 goto out_ok;
1379
1380 modify_ip_header = false;
1381 tcf_exts_to_list(exts, &actions);
1382 list_for_each_entry(a, &actions, list) {
1383 if (!is_tcf_pedit(a))
1384 continue;
1385
1386 nkeys = tcf_pedit_nkeys(a);
1387 for (i = 0; i < nkeys; i++) {
1388 htype = tcf_pedit_htype(a, i);
1389 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1390 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1391 modify_ip_header = true;
1392 break;
1393 }
1394 }
1395 }
1396
1397 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1398 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
1399 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
1400 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1401 return false;
1402 }
1403
1404 out_ok:
1405 return true;
1406 }
1407
actions_match_supported(struct mlx5e_priv * priv,struct tcf_exts * exts,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow)1408 static bool actions_match_supported(struct mlx5e_priv *priv,
1409 struct tcf_exts *exts,
1410 struct mlx5e_tc_flow_parse_attr *parse_attr,
1411 struct mlx5e_tc_flow *flow)
1412 {
1413 u32 actions;
1414
1415 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1416 actions = flow->esw_attr->action;
1417 else
1418 actions = flow->nic_attr->action;
1419
1420 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1421 return modify_header_match_supported(&parse_attr->spec, exts);
1422
1423 return true;
1424 }
1425
parse_tc_nic_actions(struct mlx5e_priv * priv,struct tcf_exts * exts,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow)1426 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1427 struct mlx5e_tc_flow_parse_attr *parse_attr,
1428 struct mlx5e_tc_flow *flow)
1429 {
1430 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1431 const struct tc_action *a;
1432 LIST_HEAD(actions);
1433 int err;
1434
1435 if (!tcf_exts_has_actions(exts))
1436 return -EINVAL;
1437
1438 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1439 attr->action = 0;
1440
1441 tcf_exts_to_list(exts, &actions);
1442 list_for_each_entry(a, &actions, list) {
1443 if (is_tcf_gact_shot(a)) {
1444 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1445 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1446 flow_table_properties_nic_receive.flow_counter))
1447 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1448 continue;
1449 }
1450
1451 if (is_tcf_pedit(a)) {
1452 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1453 parse_attr);
1454 if (err)
1455 return err;
1456
1457 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1458 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1459 continue;
1460 }
1461
1462 if (is_tcf_csum(a)) {
1463 if (csum_offload_supported(priv, attr->action,
1464 tcf_csum_update_flags(a)))
1465 continue;
1466
1467 return -EOPNOTSUPP;
1468 }
1469
1470 if (is_tcf_skbedit_mark(a)) {
1471 u32 mark = tcf_skbedit_mark(a);
1472
1473 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1474 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1475 mark);
1476 return -EINVAL;
1477 }
1478
1479 attr->flow_tag = mark;
1480 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1481 continue;
1482 }
1483
1484 return -EINVAL;
1485 }
1486
1487 if (!actions_match_supported(priv, exts, parse_attr, flow))
1488 return -EOPNOTSUPP;
1489
1490 return 0;
1491 }
1492
cmp_encap_info(struct ip_tunnel_key * a,struct ip_tunnel_key * b)1493 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1494 struct ip_tunnel_key *b)
1495 {
1496 return memcmp(a, b, sizeof(*a));
1497 }
1498
hash_encap_info(struct ip_tunnel_key * key)1499 static inline int hash_encap_info(struct ip_tunnel_key *key)
1500 {
1501 return jhash(key, sizeof(*key), 0);
1502 }
1503
mlx5e_route_lookup_ipv4(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct net_device ** out_dev,struct flowi4 * fl4,struct neighbour ** out_n,int * out_ttl)1504 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1505 struct net_device *mirred_dev,
1506 struct net_device **out_dev,
1507 struct flowi4 *fl4,
1508 struct neighbour **out_n,
1509 int *out_ttl)
1510 {
1511 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1512 struct rtable *rt;
1513 struct neighbour *n = NULL;
1514
1515 #if IS_ENABLED(CONFIG_INET)
1516 int ret;
1517
1518 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1519 ret = PTR_ERR_OR_ZERO(rt);
1520 if (ret)
1521 return ret;
1522 #else
1523 return -EOPNOTSUPP;
1524 #endif
1525 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1526 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1527 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1528 else
1529 *out_dev = rt->dst.dev;
1530
1531 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1532 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1533 ip_rt_put(rt);
1534 if (!n)
1535 return -ENOMEM;
1536
1537 *out_n = n;
1538 return 0;
1539 }
1540
mlx5e_route_lookup_ipv6(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct net_device ** out_dev,struct flowi6 * fl6,struct neighbour ** out_n,int * out_ttl)1541 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1542 struct net_device *mirred_dev,
1543 struct net_device **out_dev,
1544 struct flowi6 *fl6,
1545 struct neighbour **out_n,
1546 int *out_ttl)
1547 {
1548 struct neighbour *n = NULL;
1549 struct dst_entry *dst;
1550
1551 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1552 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1553 int ret;
1554
1555 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1556 fl6);
1557 if (ret < 0)
1558 return ret;
1559
1560 *out_ttl = ip6_dst_hoplimit(dst);
1561
1562 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1563 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1564 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1565 else
1566 *out_dev = dst->dev;
1567 #else
1568 return -EOPNOTSUPP;
1569 #endif
1570
1571 n = dst_neigh_lookup(dst, &fl6->daddr);
1572 dst_release(dst);
1573 if (!n)
1574 return -ENOMEM;
1575
1576 *out_n = n;
1577 return 0;
1578 }
1579
gen_vxlan_header_ipv4(struct net_device * out_dev,char buf[],int encap_size,unsigned char h_dest[ETH_ALEN],int ttl,__be32 daddr,__be32 saddr,__be16 udp_dst_port,__be32 vx_vni)1580 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1581 char buf[], int encap_size,
1582 unsigned char h_dest[ETH_ALEN],
1583 int ttl,
1584 __be32 daddr,
1585 __be32 saddr,
1586 __be16 udp_dst_port,
1587 __be32 vx_vni)
1588 {
1589 struct ethhdr *eth = (struct ethhdr *)buf;
1590 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1591 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1592 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1593
1594 memset(buf, 0, encap_size);
1595
1596 ether_addr_copy(eth->h_dest, h_dest);
1597 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1598 eth->h_proto = htons(ETH_P_IP);
1599
1600 ip->daddr = daddr;
1601 ip->saddr = saddr;
1602
1603 ip->ttl = ttl;
1604 ip->protocol = IPPROTO_UDP;
1605 ip->version = 0x4;
1606 ip->ihl = 0x5;
1607
1608 udp->dest = udp_dst_port;
1609 vxh->vx_flags = VXLAN_HF_VNI;
1610 vxh->vx_vni = vxlan_vni_field(vx_vni);
1611 }
1612
gen_vxlan_header_ipv6(struct net_device * out_dev,char buf[],int encap_size,unsigned char h_dest[ETH_ALEN],int ttl,struct in6_addr * daddr,struct in6_addr * saddr,__be16 udp_dst_port,__be32 vx_vni)1613 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1614 char buf[], int encap_size,
1615 unsigned char h_dest[ETH_ALEN],
1616 int ttl,
1617 struct in6_addr *daddr,
1618 struct in6_addr *saddr,
1619 __be16 udp_dst_port,
1620 __be32 vx_vni)
1621 {
1622 struct ethhdr *eth = (struct ethhdr *)buf;
1623 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1624 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1625 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1626
1627 memset(buf, 0, encap_size);
1628
1629 ether_addr_copy(eth->h_dest, h_dest);
1630 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1631 eth->h_proto = htons(ETH_P_IPV6);
1632
1633 ip6_flow_hdr(ip6h, 0, 0);
1634 /* the HW fills up ipv6 payload len */
1635 ip6h->nexthdr = IPPROTO_UDP;
1636 ip6h->hop_limit = ttl;
1637 ip6h->daddr = *daddr;
1638 ip6h->saddr = *saddr;
1639
1640 udp->dest = udp_dst_port;
1641 vxh->vx_flags = VXLAN_HF_VNI;
1642 vxh->vx_vni = vxlan_vni_field(vx_vni);
1643 }
1644
mlx5e_create_encap_header_ipv4(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct mlx5e_encap_entry * e)1645 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1646 struct net_device *mirred_dev,
1647 struct mlx5e_encap_entry *e)
1648 {
1649 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1650 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1651 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1652 struct net_device *out_dev;
1653 struct neighbour *n = NULL;
1654 struct flowi4 fl4 = {};
1655 char *encap_header;
1656 int ttl, err;
1657 u8 nud_state;
1658
1659 if (max_encap_size < ipv4_encap_size) {
1660 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1661 ipv4_encap_size, max_encap_size);
1662 return -EOPNOTSUPP;
1663 }
1664
1665 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1666 if (!encap_header)
1667 return -ENOMEM;
1668
1669 switch (e->tunnel_type) {
1670 case MLX5_HEADER_TYPE_VXLAN:
1671 fl4.flowi4_proto = IPPROTO_UDP;
1672 fl4.fl4_dport = tun_key->tp_dst;
1673 break;
1674 default:
1675 err = -EOPNOTSUPP;
1676 goto free_encap;
1677 }
1678 fl4.flowi4_tos = tun_key->tos;
1679 fl4.daddr = tun_key->u.ipv4.dst;
1680 fl4.saddr = tun_key->u.ipv4.src;
1681
1682 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1683 &fl4, &n, &ttl);
1684 if (err)
1685 goto free_encap;
1686
1687 /* used by mlx5e_detach_encap to lookup a neigh hash table
1688 * entry in the neigh hash table when a user deletes a rule
1689 */
1690 e->m_neigh.dev = n->dev;
1691 e->m_neigh.family = n->ops->family;
1692 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1693 e->out_dev = out_dev;
1694
1695 /* It's importent to add the neigh to the hash table before checking
1696 * the neigh validity state. So if we'll get a notification, in case the
1697 * neigh changes it's validity state, we would find the relevant neigh
1698 * in the hash.
1699 */
1700 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1701 if (err)
1702 goto free_encap;
1703
1704 read_lock_bh(&n->lock);
1705 nud_state = n->nud_state;
1706 ether_addr_copy(e->h_dest, n->ha);
1707 read_unlock_bh(&n->lock);
1708
1709 switch (e->tunnel_type) {
1710 case MLX5_HEADER_TYPE_VXLAN:
1711 gen_vxlan_header_ipv4(out_dev, encap_header,
1712 ipv4_encap_size, e->h_dest, ttl,
1713 fl4.daddr,
1714 fl4.saddr, tun_key->tp_dst,
1715 tunnel_id_to_key32(tun_key->tun_id));
1716 break;
1717 default:
1718 err = -EOPNOTSUPP;
1719 goto destroy_neigh_entry;
1720 }
1721 e->encap_size = ipv4_encap_size;
1722 e->encap_header = encap_header;
1723
1724 if (!(nud_state & NUD_VALID)) {
1725 neigh_event_send(n, NULL);
1726 err = -EAGAIN;
1727 goto out;
1728 }
1729
1730 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1731 ipv4_encap_size, encap_header, &e->encap_id);
1732 if (err)
1733 goto destroy_neigh_entry;
1734
1735 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1736 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1737 neigh_release(n);
1738 return err;
1739
1740 destroy_neigh_entry:
1741 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1742 free_encap:
1743 kfree(encap_header);
1744 out:
1745 if (n)
1746 neigh_release(n);
1747 return err;
1748 }
1749
mlx5e_create_encap_header_ipv6(struct mlx5e_priv * priv,struct net_device * mirred_dev,struct mlx5e_encap_entry * e)1750 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1751 struct net_device *mirred_dev,
1752 struct mlx5e_encap_entry *e)
1753 {
1754 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1755 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1756 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1757 struct net_device *out_dev;
1758 struct neighbour *n = NULL;
1759 struct flowi6 fl6 = {};
1760 char *encap_header;
1761 int err, ttl = 0;
1762 u8 nud_state;
1763
1764 if (max_encap_size < ipv6_encap_size) {
1765 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1766 ipv6_encap_size, max_encap_size);
1767 return -EOPNOTSUPP;
1768 }
1769
1770 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1771 if (!encap_header)
1772 return -ENOMEM;
1773
1774 switch (e->tunnel_type) {
1775 case MLX5_HEADER_TYPE_VXLAN:
1776 fl6.flowi6_proto = IPPROTO_UDP;
1777 fl6.fl6_dport = tun_key->tp_dst;
1778 break;
1779 default:
1780 err = -EOPNOTSUPP;
1781 goto free_encap;
1782 }
1783
1784 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1785 fl6.daddr = tun_key->u.ipv6.dst;
1786 fl6.saddr = tun_key->u.ipv6.src;
1787
1788 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1789 &fl6, &n, &ttl);
1790 if (err)
1791 goto free_encap;
1792
1793 /* used by mlx5e_detach_encap to lookup a neigh hash table
1794 * entry in the neigh hash table when a user deletes a rule
1795 */
1796 e->m_neigh.dev = n->dev;
1797 e->m_neigh.family = n->ops->family;
1798 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1799 e->out_dev = out_dev;
1800
1801 /* It's importent to add the neigh to the hash table before checking
1802 * the neigh validity state. So if we'll get a notification, in case the
1803 * neigh changes it's validity state, we would find the relevant neigh
1804 * in the hash.
1805 */
1806 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1807 if (err)
1808 goto free_encap;
1809
1810 read_lock_bh(&n->lock);
1811 nud_state = n->nud_state;
1812 ether_addr_copy(e->h_dest, n->ha);
1813 read_unlock_bh(&n->lock);
1814
1815 switch (e->tunnel_type) {
1816 case MLX5_HEADER_TYPE_VXLAN:
1817 gen_vxlan_header_ipv6(out_dev, encap_header,
1818 ipv6_encap_size, e->h_dest, ttl,
1819 &fl6.daddr,
1820 &fl6.saddr, tun_key->tp_dst,
1821 tunnel_id_to_key32(tun_key->tun_id));
1822 break;
1823 default:
1824 err = -EOPNOTSUPP;
1825 goto destroy_neigh_entry;
1826 }
1827
1828 e->encap_size = ipv6_encap_size;
1829 e->encap_header = encap_header;
1830
1831 if (!(nud_state & NUD_VALID)) {
1832 neigh_event_send(n, NULL);
1833 err = -EAGAIN;
1834 goto out;
1835 }
1836
1837 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1838 ipv6_encap_size, encap_header, &e->encap_id);
1839 if (err)
1840 goto destroy_neigh_entry;
1841
1842 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1843 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1844 neigh_release(n);
1845 return err;
1846
1847 destroy_neigh_entry:
1848 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1849 free_encap:
1850 kfree(encap_header);
1851 out:
1852 if (n)
1853 neigh_release(n);
1854 return err;
1855 }
1856
mlx5e_attach_encap(struct mlx5e_priv * priv,struct ip_tunnel_info * tun_info,struct net_device * mirred_dev,struct net_device ** encap_dev,struct mlx5e_tc_flow * flow)1857 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1858 struct ip_tunnel_info *tun_info,
1859 struct net_device *mirred_dev,
1860 struct net_device **encap_dev,
1861 struct mlx5e_tc_flow *flow)
1862 {
1863 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1864 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1865 unsigned short family = ip_tunnel_info_af(tun_info);
1866 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1867 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1868 struct ip_tunnel_key *key = &tun_info->key;
1869 struct mlx5e_encap_entry *e;
1870 int tunnel_type, err = 0;
1871 uintptr_t hash_key;
1872 bool found = false;
1873
1874 /* udp dst port must be set */
1875 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1876 goto vxlan_encap_offload_err;
1877
1878 /* setting udp src port isn't supported */
1879 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1880 vxlan_encap_offload_err:
1881 netdev_warn(priv->netdev,
1882 "must set udp dst port and not set udp src port\n");
1883 return -EOPNOTSUPP;
1884 }
1885
1886 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1887 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1888 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1889 } else {
1890 netdev_warn(priv->netdev,
1891 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1892 return -EOPNOTSUPP;
1893 }
1894
1895 hash_key = hash_encap_info(key);
1896
1897 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1898 encap_hlist, hash_key) {
1899 if (!cmp_encap_info(&e->tun_info.key, key)) {
1900 found = true;
1901 break;
1902 }
1903 }
1904
1905 /* must verify if encap is valid or not */
1906 if (found)
1907 goto attach_flow;
1908
1909 e = kzalloc(sizeof(*e), GFP_KERNEL);
1910 if (!e)
1911 return -ENOMEM;
1912
1913 e->tun_info = *tun_info;
1914 e->tunnel_type = tunnel_type;
1915 INIT_LIST_HEAD(&e->flows);
1916
1917 if (family == AF_INET)
1918 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1919 else if (family == AF_INET6)
1920 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1921
1922 if (err && err != -EAGAIN)
1923 goto out_err;
1924
1925 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1926
1927 attach_flow:
1928 list_add(&flow->encap, &e->flows);
1929 *encap_dev = e->out_dev;
1930 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1931 attr->encap_id = e->encap_id;
1932 else
1933 err = -EAGAIN;
1934
1935 return err;
1936
1937 out_err:
1938 kfree(e);
1939 return err;
1940 }
1941
parse_tc_fdb_actions(struct mlx5e_priv * priv,struct tcf_exts * exts,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow)1942 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1943 struct mlx5e_tc_flow_parse_attr *parse_attr,
1944 struct mlx5e_tc_flow *flow)
1945 {
1946 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1947 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1948 struct ip_tunnel_info *info = NULL;
1949 const struct tc_action *a;
1950 LIST_HEAD(actions);
1951 bool encap = false;
1952 int err = 0;
1953
1954 if (!tcf_exts_has_actions(exts))
1955 return -EINVAL;
1956
1957 memset(attr, 0, sizeof(*attr));
1958 attr->in_rep = rpriv->rep;
1959
1960 tcf_exts_to_list(exts, &actions);
1961 list_for_each_entry(a, &actions, list) {
1962 if (is_tcf_gact_shot(a)) {
1963 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1964 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1965 continue;
1966 }
1967
1968 if (is_tcf_pedit(a)) {
1969 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1970 parse_attr);
1971 if (err)
1972 return err;
1973
1974 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1975 continue;
1976 }
1977
1978 if (is_tcf_csum(a)) {
1979 if (csum_offload_supported(priv, attr->action,
1980 tcf_csum_update_flags(a)))
1981 continue;
1982
1983 return -EOPNOTSUPP;
1984 }
1985
1986 if (is_tcf_mirred_egress_redirect(a)) {
1987 int ifindex = tcf_mirred_ifindex(a);
1988 struct net_device *out_dev;
1989 struct mlx5e_priv *out_priv;
1990
1991 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1992
1993 if (switchdev_port_same_parent_id(priv->netdev,
1994 out_dev)) {
1995 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1996 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1997 out_priv = netdev_priv(out_dev);
1998 rpriv = out_priv->ppriv;
1999 attr->out_rep = rpriv->rep;
2000 } else if (encap) {
2001 parse_attr->mirred_ifindex = ifindex;
2002 parse_attr->tun_info = *info;
2003 attr->parse_attr = parse_attr;
2004 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2005 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2006 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2007 /* attr->out_rep is resolved when we handle encap */
2008 } else {
2009 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2010 priv->netdev->name, out_dev->name);
2011 return -EINVAL;
2012 }
2013 continue;
2014 }
2015
2016 if (is_tcf_tunnel_set(a)) {
2017 info = tcf_tunnel_info(a);
2018 if (info)
2019 encap = true;
2020 else
2021 return -EOPNOTSUPP;
2022 continue;
2023 }
2024
2025 if (is_tcf_vlan(a)) {
2026 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2027 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2028 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2029 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2030 tcf_vlan_push_prio(a))
2031 return -EOPNOTSUPP;
2032
2033 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2034 attr->vlan = tcf_vlan_push_vid(a);
2035 } else { /* action is TCA_VLAN_ACT_MODIFY */
2036 return -EOPNOTSUPP;
2037 }
2038 continue;
2039 }
2040
2041 if (is_tcf_tunnel_release(a)) {
2042 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2043 continue;
2044 }
2045
2046 return -EINVAL;
2047 }
2048
2049 if (!actions_match_supported(priv, exts, parse_attr, flow))
2050 return -EOPNOTSUPP;
2051
2052 return err;
2053 }
2054
mlx5e_configure_flower(struct mlx5e_priv * priv,struct tc_cls_flower_offload * f)2055 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2056 struct tc_cls_flower_offload *f)
2057 {
2058 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2059 struct mlx5e_tc_flow_parse_attr *parse_attr;
2060 struct mlx5e_tc_table *tc = &priv->fs.tc;
2061 struct mlx5e_tc_flow *flow;
2062 int attr_size, err = 0;
2063 u8 flow_flags = 0;
2064
2065 if (esw && esw->mode == SRIOV_OFFLOADS) {
2066 flow_flags = MLX5E_TC_FLOW_ESWITCH;
2067 attr_size = sizeof(struct mlx5_esw_flow_attr);
2068 } else {
2069 flow_flags = MLX5E_TC_FLOW_NIC;
2070 attr_size = sizeof(struct mlx5_nic_flow_attr);
2071 }
2072
2073 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2074 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2075 if (!parse_attr || !flow) {
2076 err = -ENOMEM;
2077 goto err_free;
2078 }
2079
2080 flow->cookie = f->cookie;
2081 flow->flags = flow_flags;
2082
2083 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2084 if (err < 0)
2085 goto err_free;
2086
2087 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2088 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2089 if (err < 0)
2090 goto err_free;
2091 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2092 } else {
2093 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2094 if (err < 0)
2095 goto err_free;
2096 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2097 }
2098
2099 if (IS_ERR(flow->rule)) {
2100 err = PTR_ERR(flow->rule);
2101 if (err != -EAGAIN)
2102 goto err_free;
2103 }
2104
2105 if (err != -EAGAIN)
2106 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2107
2108 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2109 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2110 kvfree(parse_attr);
2111
2112 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2113 tc->ht_params);
2114 if (err) {
2115 mlx5e_tc_del_flow(priv, flow);
2116 kfree(flow);
2117 }
2118
2119 return err;
2120
2121 err_free:
2122 kvfree(parse_attr);
2123 kfree(flow);
2124 return err;
2125 }
2126
mlx5e_delete_flower(struct mlx5e_priv * priv,struct tc_cls_flower_offload * f)2127 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2128 struct tc_cls_flower_offload *f)
2129 {
2130 struct mlx5e_tc_flow *flow;
2131 struct mlx5e_tc_table *tc = &priv->fs.tc;
2132
2133 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2134 tc->ht_params);
2135 if (!flow)
2136 return -EINVAL;
2137
2138 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2139
2140 mlx5e_tc_del_flow(priv, flow);
2141
2142 kfree(flow);
2143
2144 return 0;
2145 }
2146
mlx5e_stats_flower(struct mlx5e_priv * priv,struct tc_cls_flower_offload * f)2147 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2148 struct tc_cls_flower_offload *f)
2149 {
2150 struct mlx5e_tc_table *tc = &priv->fs.tc;
2151 struct mlx5e_tc_flow *flow;
2152 struct mlx5_fc *counter;
2153 u64 bytes;
2154 u64 packets;
2155 u64 lastuse;
2156
2157 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2158 tc->ht_params);
2159 if (!flow)
2160 return -EINVAL;
2161
2162 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2163 return 0;
2164
2165 counter = mlx5_flow_rule_counter(flow->rule);
2166 if (!counter)
2167 return 0;
2168
2169 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2170
2171 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2172
2173 return 0;
2174 }
2175
2176 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2177 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2178 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2179 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2180 .automatic_shrinking = true,
2181 };
2182
mlx5e_tc_init(struct mlx5e_priv * priv)2183 int mlx5e_tc_init(struct mlx5e_priv *priv)
2184 {
2185 struct mlx5e_tc_table *tc = &priv->fs.tc;
2186
2187 hash_init(tc->mod_hdr_tbl);
2188
2189 tc->ht_params = mlx5e_tc_flow_ht_params;
2190 return rhashtable_init(&tc->ht, &tc->ht_params);
2191 }
2192
_mlx5e_tc_del_flow(void * ptr,void * arg)2193 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2194 {
2195 struct mlx5e_tc_flow *flow = ptr;
2196 struct mlx5e_priv *priv = arg;
2197
2198 mlx5e_tc_del_flow(priv, flow);
2199 kfree(flow);
2200 }
2201
mlx5e_tc_cleanup(struct mlx5e_priv * priv)2202 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2203 {
2204 struct mlx5e_tc_table *tc = &priv->fs.tc;
2205
2206 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2207
2208 if (!IS_ERR_OR_NULL(tc->t)) {
2209 mlx5_destroy_flow_table(tc->t);
2210 tc->t = NULL;
2211 }
2212 }
2213