1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2021 Corigine, Inc. */
3
4 #include "conntrack.h"
5 #include "../nfp_port.h"
6
7 const struct rhashtable_params nfp_tc_ct_merge_params = {
8 .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
9 hash_node),
10 .key_len = sizeof(unsigned long) * 2,
11 .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
12 .automatic_shrinking = true,
13 };
14
15 const struct rhashtable_params nfp_nft_ct_merge_params = {
16 .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
17 hash_node),
18 .key_len = sizeof(unsigned long) * 3,
19 .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
20 .automatic_shrinking = true,
21 };
22
23 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
24 enum flow_action_id act_id);
25
26 /**
27 * get_hashentry() - Wrapper around hashtable lookup.
28 * @ht: hashtable where entry could be found
29 * @key: key to lookup
30 * @params: hashtable params
31 * @size: size of entry to allocate if not in table
32 *
33 * Returns an entry from a hashtable. If entry does not exist
34 * yet allocate the memory for it and return the new entry.
35 */
get_hashentry(struct rhashtable * ht,void * key,const struct rhashtable_params params,size_t size)36 static void *get_hashentry(struct rhashtable *ht, void *key,
37 const struct rhashtable_params params, size_t size)
38 {
39 void *result;
40
41 result = rhashtable_lookup_fast(ht, key, params);
42
43 if (result)
44 return result;
45
46 result = kzalloc(size, GFP_KERNEL);
47 if (!result)
48 return ERR_PTR(-ENOMEM);
49
50 return result;
51 }
52
is_pre_ct_flow(struct flow_cls_offload * flow)53 bool is_pre_ct_flow(struct flow_cls_offload *flow)
54 {
55 struct flow_action_entry *act;
56 int i;
57
58 flow_action_for_each(i, act, &flow->rule->action) {
59 if (act->id == FLOW_ACTION_CT && !act->ct.action)
60 return true;
61 }
62 return false;
63 }
64
is_post_ct_flow(struct flow_cls_offload * flow)65 bool is_post_ct_flow(struct flow_cls_offload *flow)
66 {
67 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
68 struct flow_dissector *dissector = rule->match.dissector;
69 struct flow_match_ct ct;
70
71 if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
72 flow_rule_match_ct(rule, &ct);
73 if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
74 return true;
75 }
76 return false;
77 }
78
nfp_ct_merge_check(struct nfp_fl_ct_flow_entry * entry1,struct nfp_fl_ct_flow_entry * entry2)79 static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
80 struct nfp_fl_ct_flow_entry *entry2)
81 {
82 unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
83 entry2->rule->match.dissector->used_keys;
84 bool out;
85
86 /* check the overlapped fields one by one, the unmasked part
87 * should not conflict with each other.
88 */
89 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
90 struct flow_match_control match1, match2;
91
92 flow_rule_match_control(entry1->rule, &match1);
93 flow_rule_match_control(entry2->rule, &match2);
94 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
95 if (out)
96 goto check_failed;
97 }
98
99 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
100 struct flow_match_basic match1, match2;
101
102 flow_rule_match_basic(entry1->rule, &match1);
103 flow_rule_match_basic(entry2->rule, &match2);
104 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
105 if (out)
106 goto check_failed;
107 }
108
109 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
110 struct flow_match_ipv4_addrs match1, match2;
111
112 flow_rule_match_ipv4_addrs(entry1->rule, &match1);
113 flow_rule_match_ipv4_addrs(entry2->rule, &match2);
114 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
115 if (out)
116 goto check_failed;
117 }
118
119 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
120 struct flow_match_ipv6_addrs match1, match2;
121
122 flow_rule_match_ipv6_addrs(entry1->rule, &match1);
123 flow_rule_match_ipv6_addrs(entry2->rule, &match2);
124 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
125 if (out)
126 goto check_failed;
127 }
128
129 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
130 struct flow_match_ports match1, match2;
131
132 flow_rule_match_ports(entry1->rule, &match1);
133 flow_rule_match_ports(entry2->rule, &match2);
134 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
135 if (out)
136 goto check_failed;
137 }
138
139 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
140 struct flow_match_eth_addrs match1, match2;
141
142 flow_rule_match_eth_addrs(entry1->rule, &match1);
143 flow_rule_match_eth_addrs(entry2->rule, &match2);
144 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
145 if (out)
146 goto check_failed;
147 }
148
149 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
150 struct flow_match_vlan match1, match2;
151
152 flow_rule_match_vlan(entry1->rule, &match1);
153 flow_rule_match_vlan(entry2->rule, &match2);
154 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
155 if (out)
156 goto check_failed;
157 }
158
159 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
160 struct flow_match_mpls match1, match2;
161
162 flow_rule_match_mpls(entry1->rule, &match1);
163 flow_rule_match_mpls(entry2->rule, &match2);
164 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
165 if (out)
166 goto check_failed;
167 }
168
169 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
170 struct flow_match_tcp match1, match2;
171
172 flow_rule_match_tcp(entry1->rule, &match1);
173 flow_rule_match_tcp(entry2->rule, &match2);
174 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
175 if (out)
176 goto check_failed;
177 }
178
179 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
180 struct flow_match_ip match1, match2;
181
182 flow_rule_match_ip(entry1->rule, &match1);
183 flow_rule_match_ip(entry2->rule, &match2);
184 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
185 if (out)
186 goto check_failed;
187 }
188
189 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
190 struct flow_match_enc_keyid match1, match2;
191
192 flow_rule_match_enc_keyid(entry1->rule, &match1);
193 flow_rule_match_enc_keyid(entry2->rule, &match2);
194 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
195 if (out)
196 goto check_failed;
197 }
198
199 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
200 struct flow_match_ipv4_addrs match1, match2;
201
202 flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
203 flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
204 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
205 if (out)
206 goto check_failed;
207 }
208
209 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
210 struct flow_match_ipv6_addrs match1, match2;
211
212 flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
213 flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
214 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
215 if (out)
216 goto check_failed;
217 }
218
219 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
220 struct flow_match_control match1, match2;
221
222 flow_rule_match_enc_control(entry1->rule, &match1);
223 flow_rule_match_enc_control(entry2->rule, &match2);
224 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
225 if (out)
226 goto check_failed;
227 }
228
229 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
230 struct flow_match_ip match1, match2;
231
232 flow_rule_match_enc_ip(entry1->rule, &match1);
233 flow_rule_match_enc_ip(entry2->rule, &match2);
234 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
235 if (out)
236 goto check_failed;
237 }
238
239 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
240 struct flow_match_enc_opts match1, match2;
241
242 flow_rule_match_enc_opts(entry1->rule, &match1);
243 flow_rule_match_enc_opts(entry2->rule, &match2);
244 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
245 if (out)
246 goto check_failed;
247 }
248
249 return 0;
250
251 check_failed:
252 return -EINVAL;
253 }
254
nfp_ct_check_mangle_merge(struct flow_action_entry * a_in,struct flow_rule * rule)255 static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
256 struct flow_rule *rule)
257 {
258 enum flow_action_mangle_base htype = a_in->mangle.htype;
259 u32 offset = a_in->mangle.offset;
260
261 switch (htype) {
262 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
263 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
264 return -EOPNOTSUPP;
265 break;
266 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
267 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
268 struct flow_match_ip match;
269
270 flow_rule_match_ip(rule, &match);
271 if (offset == offsetof(struct iphdr, ttl) &&
272 match.mask->ttl)
273 return -EOPNOTSUPP;
274 if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
275 match.mask->tos)
276 return -EOPNOTSUPP;
277 }
278 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
279 struct flow_match_ipv4_addrs match;
280
281 flow_rule_match_ipv4_addrs(rule, &match);
282 if (offset == offsetof(struct iphdr, saddr) &&
283 match.mask->src)
284 return -EOPNOTSUPP;
285 if (offset == offsetof(struct iphdr, daddr) &&
286 match.mask->dst)
287 return -EOPNOTSUPP;
288 }
289 break;
290 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
291 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
292 struct flow_match_ip match;
293
294 flow_rule_match_ip(rule, &match);
295 if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
296 match.mask->ttl)
297 return -EOPNOTSUPP;
298 /* for ipv6, tos and flow_lbl are in the same word */
299 if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
300 match.mask->tos)
301 return -EOPNOTSUPP;
302 }
303 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
304 struct flow_match_ipv6_addrs match;
305
306 flow_rule_match_ipv6_addrs(rule, &match);
307 if (offset >= offsetof(struct ipv6hdr, saddr) &&
308 offset < offsetof(struct ipv6hdr, daddr) &&
309 memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
310 return -EOPNOTSUPP;
311 if (offset >= offsetof(struct ipv6hdr, daddr) &&
312 offset < sizeof(struct ipv6hdr) &&
313 memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
314 return -EOPNOTSUPP;
315 }
316 break;
317 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
318 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
319 /* currently only can modify ports */
320 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
321 return -EOPNOTSUPP;
322 break;
323 default:
324 break;
325 }
326 return 0;
327 }
328
nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry * pre_ct_entry,struct nfp_fl_ct_flow_entry * post_ct_entry,struct nfp_fl_ct_flow_entry * nft_entry)329 static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
330 struct nfp_fl_ct_flow_entry *post_ct_entry,
331 struct nfp_fl_ct_flow_entry *nft_entry)
332 {
333 struct flow_action_entry *act;
334 int err, i;
335
336 /* Check for pre_ct->action conflicts */
337 flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
338 switch (act->id) {
339 case FLOW_ACTION_MANGLE:
340 err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
341 if (err)
342 return err;
343 err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
344 if (err)
345 return err;
346 break;
347 case FLOW_ACTION_VLAN_PUSH:
348 case FLOW_ACTION_VLAN_POP:
349 case FLOW_ACTION_VLAN_MANGLE:
350 case FLOW_ACTION_MPLS_PUSH:
351 case FLOW_ACTION_MPLS_POP:
352 case FLOW_ACTION_MPLS_MANGLE:
353 return -EOPNOTSUPP;
354 default:
355 break;
356 }
357 }
358
359 /* Check for nft->action conflicts */
360 flow_action_for_each(i, act, &nft_entry->rule->action) {
361 switch (act->id) {
362 case FLOW_ACTION_MANGLE:
363 err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
364 if (err)
365 return err;
366 break;
367 case FLOW_ACTION_VLAN_PUSH:
368 case FLOW_ACTION_VLAN_POP:
369 case FLOW_ACTION_VLAN_MANGLE:
370 case FLOW_ACTION_MPLS_PUSH:
371 case FLOW_ACTION_MPLS_POP:
372 case FLOW_ACTION_MPLS_MANGLE:
373 return -EOPNOTSUPP;
374 default:
375 break;
376 }
377 }
378 return 0;
379 }
380
nfp_ct_check_meta(struct nfp_fl_ct_flow_entry * post_ct_entry,struct nfp_fl_ct_flow_entry * nft_entry)381 static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
382 struct nfp_fl_ct_flow_entry *nft_entry)
383 {
384 struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
385 struct flow_action_entry *ct_met;
386 struct flow_match_ct ct;
387 int i;
388
389 ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
390 if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
391 u32 *act_lbl;
392
393 act_lbl = ct_met->ct_metadata.labels;
394 flow_rule_match_ct(post_ct_entry->rule, &ct);
395 for (i = 0; i < 4; i++) {
396 if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
397 (act_lbl[i] & ct.mask->ct_labels[i]))
398 return -EINVAL;
399 }
400
401 if ((ct.key->ct_mark & ct.mask->ct_mark) ^
402 (ct_met->ct_metadata.mark & ct.mask->ct_mark))
403 return -EINVAL;
404
405 return 0;
406 }
407
408 return -EINVAL;
409 }
410
411 static int
nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls,uint16_t * map)412 nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
413 {
414 int key_size;
415
416 /* This field must always be present */
417 key_size = sizeof(struct nfp_flower_meta_tci);
418 map[FLOW_PAY_META_TCI] = 0;
419
420 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
421 map[FLOW_PAY_EXT_META] = key_size;
422 key_size += sizeof(struct nfp_flower_ext_meta);
423 }
424 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
425 map[FLOW_PAY_INPORT] = key_size;
426 key_size += sizeof(struct nfp_flower_in_port);
427 }
428 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
429 map[FLOW_PAY_MAC_MPLS] = key_size;
430 key_size += sizeof(struct nfp_flower_mac_mpls);
431 }
432 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
433 map[FLOW_PAY_L4] = key_size;
434 key_size += sizeof(struct nfp_flower_tp_ports);
435 }
436 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
437 map[FLOW_PAY_IPV4] = key_size;
438 key_size += sizeof(struct nfp_flower_ipv4);
439 }
440 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
441 map[FLOW_PAY_IPV6] = key_size;
442 key_size += sizeof(struct nfp_flower_ipv6);
443 }
444
445 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
446 map[FLOW_PAY_QINQ] = key_size;
447 key_size += sizeof(struct nfp_flower_vlan);
448 }
449
450 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
451 map[FLOW_PAY_GRE] = key_size;
452 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
453 key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
454 else
455 key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
456 }
457
458 if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
459 (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
460 map[FLOW_PAY_UDP_TUN] = key_size;
461 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
462 key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
463 else
464 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
465 }
466
467 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
468 map[FLOW_PAY_GENEVE_OPT] = key_size;
469 key_size += sizeof(struct nfp_flower_geneve_options);
470 }
471
472 return key_size;
473 }
474
nfp_fl_merge_actions_offload(struct flow_rule ** rules,struct nfp_flower_priv * priv,struct net_device * netdev,struct nfp_fl_payload * flow_pay)475 static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
476 struct nfp_flower_priv *priv,
477 struct net_device *netdev,
478 struct nfp_fl_payload *flow_pay)
479 {
480 struct flow_action_entry *a_in;
481 int i, j, num_actions, id;
482 struct flow_rule *a_rule;
483 int err = 0, offset = 0;
484
485 num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
486 rules[CT_TYPE_NFT]->action.num_entries +
487 rules[CT_TYPE_POST_CT]->action.num_entries;
488
489 a_rule = flow_rule_alloc(num_actions);
490 if (!a_rule)
491 return -ENOMEM;
492
493 /* Actions need a BASIC dissector. */
494 a_rule->match = rules[CT_TYPE_PRE_CT]->match;
495
496 /* Copy actions */
497 for (j = 0; j < _CT_TYPE_MAX; j++) {
498 if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
499 struct flow_match_basic match;
500
501 /* ip_proto is the only field that needed in later compile_action,
502 * needed to set the correct checksum flags. It doesn't really matter
503 * which input rule's ip_proto field we take as the earlier merge checks
504 * would have made sure that they don't conflict. We do not know which
505 * of the subflows would have the ip_proto filled in, so we need to iterate
506 * through the subflows and assign the proper subflow to a_rule
507 */
508 flow_rule_match_basic(rules[j], &match);
509 if (match.mask->ip_proto)
510 a_rule->match = rules[j]->match;
511 }
512
513 for (i = 0; i < rules[j]->action.num_entries; i++) {
514 a_in = &rules[j]->action.entries[i];
515 id = a_in->id;
516
517 /* Ignore CT related actions as these would already have
518 * been taken care of by previous checks, and we do not send
519 * any CT actions to the firmware.
520 */
521 switch (id) {
522 case FLOW_ACTION_CT:
523 case FLOW_ACTION_GOTO:
524 case FLOW_ACTION_CT_METADATA:
525 continue;
526 default:
527 memcpy(&a_rule->action.entries[offset++],
528 a_in, sizeof(struct flow_action_entry));
529 break;
530 }
531 }
532 }
533
534 /* Some actions would have been ignored, so update the num_entries field */
535 a_rule->action.num_entries = offset;
536 err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
537 kfree(a_rule);
538
539 return err;
540 }
541
nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge * m_entry)542 static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
543 {
544 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
545 struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
546 struct nfp_fl_key_ls key_layer, tmp_layer;
547 struct nfp_flower_priv *priv = zt->priv;
548 u16 key_map[_FLOW_PAY_LAYERS_MAX];
549 struct nfp_fl_payload *flow_pay;
550
551 struct flow_rule *rules[_CT_TYPE_MAX];
552 u8 *key, *msk, *kdata, *mdata;
553 struct nfp_port *port = NULL;
554 struct net_device *netdev;
555 bool qinq_sup;
556 u32 port_id;
557 u16 offset;
558 int i, err;
559
560 netdev = m_entry->netdev;
561 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
562
563 rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
564 rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
565 rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
566
567 memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
568 memset(&key_map, 0, sizeof(key_map));
569
570 /* Calculate the resultant key layer and size for offload */
571 for (i = 0; i < _CT_TYPE_MAX; i++) {
572 err = nfp_flower_calculate_key_layers(priv->app,
573 m_entry->netdev,
574 &tmp_layer, rules[i],
575 &tun_type, NULL);
576 if (err)
577 return err;
578
579 key_layer.key_layer |= tmp_layer.key_layer;
580 key_layer.key_layer_two |= tmp_layer.key_layer_two;
581 }
582 key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
583
584 flow_pay = nfp_flower_allocate_new(&key_layer);
585 if (!flow_pay)
586 return -ENOMEM;
587
588 memset(flow_pay->unmasked_data, 0, key_layer.key_size);
589 memset(flow_pay->mask_data, 0, key_layer.key_size);
590
591 kdata = flow_pay->unmasked_data;
592 mdata = flow_pay->mask_data;
593
594 offset = key_map[FLOW_PAY_META_TCI];
595 key = kdata + offset;
596 msk = mdata + offset;
597 nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
598 (struct nfp_flower_meta_tci *)msk,
599 key_layer.key_layer);
600
601 if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
602 offset = key_map[FLOW_PAY_EXT_META];
603 key = kdata + offset;
604 msk = mdata + offset;
605 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
606 key_layer.key_layer_two);
607 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
608 key_layer.key_layer_two);
609 }
610
611 /* Using in_port from the -trk rule. The tc merge checks should already
612 * be checking that the ingress netdevs are the same
613 */
614 port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
615 offset = key_map[FLOW_PAY_INPORT];
616 key = kdata + offset;
617 msk = mdata + offset;
618 err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
619 port_id, false, tun_type, NULL);
620 if (err)
621 goto ct_offload_err;
622 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
623 port_id, true, tun_type, NULL);
624 if (err)
625 goto ct_offload_err;
626
627 /* This following part works on the assumption that previous checks has
628 * already filtered out flows that has different values for the different
629 * layers. Here we iterate through all three rules and merge their respective
630 * masked value(cared bits), basic method is:
631 * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
632 * final_mask = r1_mask | r2_mask | r3_mask
633 * If none of the rules contains a match that is also fine, that simply means
634 * that the layer is not present.
635 */
636 if (!qinq_sup) {
637 for (i = 0; i < _CT_TYPE_MAX; i++) {
638 offset = key_map[FLOW_PAY_META_TCI];
639 key = kdata + offset;
640 msk = mdata + offset;
641 nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
642 (struct nfp_flower_meta_tci *)msk,
643 rules[i]);
644 }
645 }
646
647 if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
648 offset = key_map[FLOW_PAY_MAC_MPLS];
649 key = kdata + offset;
650 msk = mdata + offset;
651 for (i = 0; i < _CT_TYPE_MAX; i++) {
652 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
653 (struct nfp_flower_mac_mpls *)msk,
654 rules[i]);
655 err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
656 (struct nfp_flower_mac_mpls *)msk,
657 rules[i], NULL);
658 if (err)
659 goto ct_offload_err;
660 }
661 }
662
663 if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
664 offset = key_map[FLOW_PAY_IPV4];
665 key = kdata + offset;
666 msk = mdata + offset;
667 for (i = 0; i < _CT_TYPE_MAX; i++) {
668 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
669 (struct nfp_flower_ipv4 *)msk,
670 rules[i]);
671 }
672 }
673
674 if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
675 offset = key_map[FLOW_PAY_IPV6];
676 key = kdata + offset;
677 msk = mdata + offset;
678 for (i = 0; i < _CT_TYPE_MAX; i++) {
679 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
680 (struct nfp_flower_ipv6 *)msk,
681 rules[i]);
682 }
683 }
684
685 if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
686 offset = key_map[FLOW_PAY_L4];
687 key = kdata + offset;
688 msk = mdata + offset;
689 for (i = 0; i < _CT_TYPE_MAX; i++) {
690 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
691 (struct nfp_flower_tp_ports *)msk,
692 rules[i]);
693 }
694 }
695
696 if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
697 offset = key_map[FLOW_PAY_QINQ];
698 key = kdata + offset;
699 msk = mdata + offset;
700 for (i = 0; i < _CT_TYPE_MAX; i++) {
701 nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
702 (struct nfp_flower_vlan *)msk,
703 rules[i]);
704 }
705 }
706
707 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
708 offset = key_map[FLOW_PAY_GRE];
709 key = kdata + offset;
710 msk = mdata + offset;
711 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
712 struct nfp_flower_ipv6_gre_tun *gre_match;
713 struct nfp_ipv6_addr_entry *entry;
714 struct in6_addr *dst;
715
716 for (i = 0; i < _CT_TYPE_MAX; i++) {
717 nfp_flower_compile_ipv6_gre_tun((void *)key,
718 (void *)msk, rules[i]);
719 }
720 gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
721 dst = &gre_match->ipv6.dst;
722
723 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
724 if (!entry) {
725 err = -ENOMEM;
726 goto ct_offload_err;
727 }
728
729 flow_pay->nfp_tun_ipv6 = entry;
730 } else {
731 __be32 dst;
732
733 for (i = 0; i < _CT_TYPE_MAX; i++) {
734 nfp_flower_compile_ipv4_gre_tun((void *)key,
735 (void *)msk, rules[i]);
736 }
737 dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
738
739 /* Store the tunnel destination in the rule data.
740 * This must be present and be an exact match.
741 */
742 flow_pay->nfp_tun_ipv4_addr = dst;
743 nfp_tunnel_add_ipv4_off(priv->app, dst);
744 }
745 }
746
747 if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
748 key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
749 offset = key_map[FLOW_PAY_UDP_TUN];
750 key = kdata + offset;
751 msk = mdata + offset;
752 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
753 struct nfp_flower_ipv6_udp_tun *udp_match;
754 struct nfp_ipv6_addr_entry *entry;
755 struct in6_addr *dst;
756
757 for (i = 0; i < _CT_TYPE_MAX; i++) {
758 nfp_flower_compile_ipv6_udp_tun((void *)key,
759 (void *)msk, rules[i]);
760 }
761 udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
762 dst = &udp_match->ipv6.dst;
763
764 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
765 if (!entry) {
766 err = -ENOMEM;
767 goto ct_offload_err;
768 }
769
770 flow_pay->nfp_tun_ipv6 = entry;
771 } else {
772 __be32 dst;
773
774 for (i = 0; i < _CT_TYPE_MAX; i++) {
775 nfp_flower_compile_ipv4_udp_tun((void *)key,
776 (void *)msk, rules[i]);
777 }
778 dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
779
780 /* Store the tunnel destination in the rule data.
781 * This must be present and be an exact match.
782 */
783 flow_pay->nfp_tun_ipv4_addr = dst;
784 nfp_tunnel_add_ipv4_off(priv->app, dst);
785 }
786
787 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
788 offset = key_map[FLOW_PAY_GENEVE_OPT];
789 key = kdata + offset;
790 msk = mdata + offset;
791 for (i = 0; i < _CT_TYPE_MAX; i++)
792 nfp_flower_compile_geneve_opt(key, msk, rules[i]);
793 }
794 }
795
796 /* Merge actions into flow_pay */
797 err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
798 if (err)
799 goto ct_offload_err;
800
801 /* Use the pointer address as the cookie, but set the last bit to 1.
802 * This is to avoid the 'is_merge_flow' check from detecting this as
803 * an already merged flow. This works since address alignment means
804 * that the last bit for pointer addresses will be 0.
805 */
806 flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
807 err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
808 flow_pay, netdev, NULL);
809 if (err)
810 goto ct_offload_err;
811
812 if (nfp_netdev_is_nfp_repr(netdev))
813 port = nfp_port_from_netdev(netdev);
814
815 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
816 nfp_flower_table_params);
817 if (err)
818 goto ct_release_offload_meta_err;
819
820 err = nfp_flower_xmit_flow(priv->app, flow_pay,
821 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
822 if (err)
823 goto ct_remove_rhash_err;
824
825 m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
826 m_entry->flow_pay = flow_pay;
827
828 if (port)
829 port->tc_offload_cnt++;
830
831 return err;
832
833 ct_remove_rhash_err:
834 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
835 &flow_pay->fl_node,
836 nfp_flower_table_params));
837 ct_release_offload_meta_err:
838 nfp_modify_flow_metadata(priv->app, flow_pay);
839 ct_offload_err:
840 if (flow_pay->nfp_tun_ipv4_addr)
841 nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
842 if (flow_pay->nfp_tun_ipv6)
843 nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
844 kfree(flow_pay->action_data);
845 kfree(flow_pay->mask_data);
846 kfree(flow_pay->unmasked_data);
847 kfree(flow_pay);
848 return err;
849 }
850
nfp_fl_ct_del_offload(struct nfp_app * app,unsigned long cookie,struct net_device * netdev)851 static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
852 struct net_device *netdev)
853 {
854 struct nfp_flower_priv *priv = app->priv;
855 struct nfp_fl_payload *flow_pay;
856 struct nfp_port *port = NULL;
857 int err = 0;
858
859 if (nfp_netdev_is_nfp_repr(netdev))
860 port = nfp_port_from_netdev(netdev);
861
862 flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
863 if (!flow_pay)
864 return -ENOENT;
865
866 err = nfp_modify_flow_metadata(app, flow_pay);
867 if (err)
868 goto err_free_merge_flow;
869
870 if (flow_pay->nfp_tun_ipv4_addr)
871 nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
872
873 if (flow_pay->nfp_tun_ipv6)
874 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
875
876 if (!flow_pay->in_hw) {
877 err = 0;
878 goto err_free_merge_flow;
879 }
880
881 err = nfp_flower_xmit_flow(app, flow_pay,
882 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
883
884 err_free_merge_flow:
885 nfp_flower_del_linked_merge_flows(app, flow_pay);
886 if (port)
887 port->tc_offload_cnt--;
888 kfree(flow_pay->action_data);
889 kfree(flow_pay->mask_data);
890 kfree(flow_pay->unmasked_data);
891 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
892 &flow_pay->fl_node,
893 nfp_flower_table_params));
894 kfree_rcu(flow_pay, rcu);
895 return err;
896 }
897
nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry * zt,struct nfp_fl_ct_flow_entry * nft_entry,struct nfp_fl_ct_tc_merge * tc_m_entry)898 static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
899 struct nfp_fl_ct_flow_entry *nft_entry,
900 struct nfp_fl_ct_tc_merge *tc_m_entry)
901 {
902 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
903 struct nfp_fl_nft_tc_merge *nft_m_entry;
904 unsigned long new_cookie[3];
905 int err;
906
907 pre_ct_entry = tc_m_entry->pre_ct_parent;
908 post_ct_entry = tc_m_entry->post_ct_parent;
909
910 err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
911 if (err)
912 return err;
913
914 /* Check that the two tc flows are also compatible with
915 * the nft entry. No need to check the pre_ct and post_ct
916 * entries as that was already done during pre_merge.
917 * The nft entry does not have a netdev or chain populated, so
918 * skip this check.
919 */
920 err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
921 if (err)
922 return err;
923 err = nfp_ct_merge_check(post_ct_entry, nft_entry);
924 if (err)
925 return err;
926 err = nfp_ct_check_meta(post_ct_entry, nft_entry);
927 if (err)
928 return err;
929
930 /* Combine tc_merge and nft cookies for this cookie. */
931 new_cookie[0] = tc_m_entry->cookie[0];
932 new_cookie[1] = tc_m_entry->cookie[1];
933 new_cookie[2] = nft_entry->cookie;
934 nft_m_entry = get_hashentry(&zt->nft_merge_tb,
935 &new_cookie,
936 nfp_nft_ct_merge_params,
937 sizeof(*nft_m_entry));
938
939 if (IS_ERR(nft_m_entry))
940 return PTR_ERR(nft_m_entry);
941
942 /* nft_m_entry already present, not merging again */
943 if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
944 return 0;
945
946 memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
947 nft_m_entry->zt = zt;
948 nft_m_entry->tc_m_parent = tc_m_entry;
949 nft_m_entry->nft_parent = nft_entry;
950 nft_m_entry->tc_flower_cookie = 0;
951 /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
952 * it only combined them if the netdevs were the same, so can use any of them.
953 */
954 nft_m_entry->netdev = pre_ct_entry->netdev;
955
956 /* Add this entry to the tc_m_list and nft_flow lists */
957 list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
958 list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
959
960 /* Generate offload structure and send to nfp */
961 err = nfp_fl_ct_add_offload(nft_m_entry);
962 if (err)
963 goto err_nft_ct_offload;
964
965 err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
966 nfp_nft_ct_merge_params);
967 if (err)
968 goto err_nft_ct_merge_insert;
969
970 zt->nft_merge_count++;
971
972 return err;
973
974 err_nft_ct_merge_insert:
975 nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
976 nft_m_entry->netdev);
977 err_nft_ct_offload:
978 list_del(&nft_m_entry->tc_merge_list);
979 list_del(&nft_m_entry->nft_flow_list);
980 kfree(nft_m_entry);
981 return err;
982 }
983
nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry * zt,struct nfp_fl_ct_flow_entry * ct_entry1,struct nfp_fl_ct_flow_entry * ct_entry2)984 static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
985 struct nfp_fl_ct_flow_entry *ct_entry1,
986 struct nfp_fl_ct_flow_entry *ct_entry2)
987 {
988 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
989 struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
990 struct nfp_fl_ct_tc_merge *m_entry;
991 unsigned long new_cookie[2];
992 int err;
993
994 if (ct_entry1->type == CT_TYPE_PRE_CT) {
995 pre_ct_entry = ct_entry1;
996 post_ct_entry = ct_entry2;
997 } else {
998 post_ct_entry = ct_entry1;
999 pre_ct_entry = ct_entry2;
1000 }
1001
1002 if (post_ct_entry->netdev != pre_ct_entry->netdev)
1003 return -EINVAL;
1004 /* Checks that the chain_index of the filter matches the
1005 * chain_index of the GOTO action.
1006 */
1007 if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
1008 return -EINVAL;
1009
1010 err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
1011 if (err)
1012 return err;
1013
1014 new_cookie[0] = pre_ct_entry->cookie;
1015 new_cookie[1] = post_ct_entry->cookie;
1016 m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
1017 nfp_tc_ct_merge_params, sizeof(*m_entry));
1018 if (IS_ERR(m_entry))
1019 return PTR_ERR(m_entry);
1020
1021 /* m_entry already present, not merging again */
1022 if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
1023 return 0;
1024
1025 memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
1026 m_entry->zt = zt;
1027 m_entry->post_ct_parent = post_ct_entry;
1028 m_entry->pre_ct_parent = pre_ct_entry;
1029
1030 /* Add this entry to the pre_ct and post_ct lists */
1031 list_add(&m_entry->post_ct_list, &post_ct_entry->children);
1032 list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
1033 INIT_LIST_HEAD(&m_entry->children);
1034
1035 err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
1036 nfp_tc_ct_merge_params);
1037 if (err)
1038 goto err_ct_tc_merge_insert;
1039 zt->tc_merge_count++;
1040
1041 /* Merge with existing nft flows */
1042 list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
1043 list_node) {
1044 nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
1045 }
1046
1047 return 0;
1048
1049 err_ct_tc_merge_insert:
1050 list_del(&m_entry->post_ct_list);
1051 list_del(&m_entry->pre_ct_list);
1052 kfree(m_entry);
1053 return err;
1054 }
1055
1056 static struct
get_nfp_zone_entry(struct nfp_flower_priv * priv,u16 zone,bool wildcarded)1057 nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
1058 u16 zone, bool wildcarded)
1059 {
1060 struct nfp_fl_ct_zone_entry *zt;
1061 int err;
1062
1063 if (wildcarded && priv->ct_zone_wc)
1064 return priv->ct_zone_wc;
1065
1066 if (!wildcarded) {
1067 zt = get_hashentry(&priv->ct_zone_table, &zone,
1068 nfp_zone_table_params, sizeof(*zt));
1069
1070 /* If priv is set this is an existing entry, just return it */
1071 if (IS_ERR(zt) || zt->priv)
1072 return zt;
1073 } else {
1074 zt = kzalloc(sizeof(*zt), GFP_KERNEL);
1075 if (!zt)
1076 return ERR_PTR(-ENOMEM);
1077 }
1078
1079 zt->zone = zone;
1080 zt->priv = priv;
1081 zt->nft = NULL;
1082
1083 /* init the various hash tables and lists*/
1084 INIT_LIST_HEAD(&zt->pre_ct_list);
1085 INIT_LIST_HEAD(&zt->post_ct_list);
1086 INIT_LIST_HEAD(&zt->nft_flows_list);
1087
1088 err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
1089 if (err)
1090 goto err_tc_merge_tb_init;
1091
1092 err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
1093 if (err)
1094 goto err_nft_merge_tb_init;
1095
1096 if (wildcarded) {
1097 priv->ct_zone_wc = zt;
1098 } else {
1099 err = rhashtable_insert_fast(&priv->ct_zone_table,
1100 &zt->hash_node,
1101 nfp_zone_table_params);
1102 if (err)
1103 goto err_zone_insert;
1104 }
1105
1106 return zt;
1107
1108 err_zone_insert:
1109 rhashtable_destroy(&zt->nft_merge_tb);
1110 err_nft_merge_tb_init:
1111 rhashtable_destroy(&zt->tc_merge_tb);
1112 err_tc_merge_tb_init:
1113 kfree(zt);
1114 return ERR_PTR(err);
1115 }
1116
1117 static struct
nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry * zt,struct net_device * netdev,struct flow_cls_offload * flow,bool is_nft,struct netlink_ext_ack * extack)1118 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
1119 struct net_device *netdev,
1120 struct flow_cls_offload *flow,
1121 bool is_nft, struct netlink_ext_ack *extack)
1122 {
1123 struct nf_flow_match *nft_match = NULL;
1124 struct nfp_fl_ct_flow_entry *entry;
1125 struct nfp_fl_ct_map_entry *map;
1126 struct flow_action_entry *act;
1127 int err, i;
1128
1129 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1130 if (!entry)
1131 return ERR_PTR(-ENOMEM);
1132
1133 entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
1134 if (!entry->rule) {
1135 err = -ENOMEM;
1136 goto err_pre_ct_rule;
1137 }
1138
1139 /* nft flows gets destroyed after callback return, so need
1140 * to do a full copy instead of just a reference.
1141 */
1142 if (is_nft) {
1143 nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
1144 if (!nft_match) {
1145 err = -ENOMEM;
1146 goto err_pre_ct_act;
1147 }
1148 memcpy(&nft_match->dissector, flow->rule->match.dissector,
1149 sizeof(nft_match->dissector));
1150 memcpy(&nft_match->mask, flow->rule->match.mask,
1151 sizeof(nft_match->mask));
1152 memcpy(&nft_match->key, flow->rule->match.key,
1153 sizeof(nft_match->key));
1154 entry->rule->match.dissector = &nft_match->dissector;
1155 entry->rule->match.mask = &nft_match->mask;
1156 entry->rule->match.key = &nft_match->key;
1157 } else {
1158 entry->rule->match.dissector = flow->rule->match.dissector;
1159 entry->rule->match.mask = flow->rule->match.mask;
1160 entry->rule->match.key = flow->rule->match.key;
1161 }
1162
1163 entry->zt = zt;
1164 entry->netdev = netdev;
1165 entry->cookie = flow->cookie;
1166 entry->chain_index = flow->common.chain_index;
1167 entry->tun_offset = NFP_FL_CT_NO_TUN;
1168
1169 /* Copy over action data. Unfortunately we do not get a handle to the
1170 * original tcf_action data, and the flow objects gets destroyed, so we
1171 * cannot just save a pointer to this either, so need to copy over the
1172 * data unfortunately.
1173 */
1174 entry->rule->action.num_entries = flow->rule->action.num_entries;
1175 flow_action_for_each(i, act, &flow->rule->action) {
1176 struct flow_action_entry *new_act;
1177
1178 new_act = &entry->rule->action.entries[i];
1179 memcpy(new_act, act, sizeof(struct flow_action_entry));
1180 /* Entunnel is a special case, need to allocate and copy
1181 * tunnel info.
1182 */
1183 if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
1184 struct ip_tunnel_info *tun = act->tunnel;
1185 size_t tun_size = sizeof(*tun) + tun->options_len;
1186
1187 new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
1188 if (!new_act->tunnel) {
1189 err = -ENOMEM;
1190 goto err_pre_ct_tun_cp;
1191 }
1192 entry->tun_offset = i;
1193 }
1194 }
1195
1196 INIT_LIST_HEAD(&entry->children);
1197
1198 /* Now add a ct map entry to flower-priv */
1199 map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
1200 nfp_ct_map_params, sizeof(*map));
1201 if (IS_ERR(map)) {
1202 NL_SET_ERR_MSG_MOD(extack,
1203 "offload error: ct map entry creation failed");
1204 err = -ENOMEM;
1205 goto err_ct_flow_insert;
1206 }
1207 map->cookie = flow->cookie;
1208 map->ct_entry = entry;
1209 err = rhashtable_insert_fast(&zt->priv->ct_map_table,
1210 &map->hash_node,
1211 nfp_ct_map_params);
1212 if (err) {
1213 NL_SET_ERR_MSG_MOD(extack,
1214 "offload error: ct map entry table add failed");
1215 goto err_map_insert;
1216 }
1217
1218 return entry;
1219
1220 err_map_insert:
1221 kfree(map);
1222 err_ct_flow_insert:
1223 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1224 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1225 err_pre_ct_tun_cp:
1226 kfree(nft_match);
1227 err_pre_ct_act:
1228 kfree(entry->rule);
1229 err_pre_ct_rule:
1230 kfree(entry);
1231 return ERR_PTR(err);
1232 }
1233
cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge * m_entry)1234 static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
1235 {
1236 struct nfp_fl_ct_zone_entry *zt;
1237 int err;
1238
1239 zt = m_entry->zt;
1240
1241 /* Flow is in HW, need to delete */
1242 if (m_entry->tc_flower_cookie) {
1243 err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
1244 m_entry->netdev);
1245 if (err)
1246 return;
1247 }
1248
1249 WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
1250 &m_entry->hash_node,
1251 nfp_nft_ct_merge_params));
1252 zt->nft_merge_count--;
1253 list_del(&m_entry->tc_merge_list);
1254 list_del(&m_entry->nft_flow_list);
1255
1256 kfree(m_entry);
1257 }
1258
nfp_free_nft_merge_children(void * entry,bool is_nft_flow)1259 static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
1260 {
1261 struct nfp_fl_nft_tc_merge *m_entry, *tmp;
1262
1263 /* These post entries are parts of two lists, one is a list of nft_entries
1264 * and the other is of from a list of tc_merge structures. Iterate
1265 * through the relevant list and cleanup the entries.
1266 */
1267
1268 if (is_nft_flow) {
1269 /* Need to iterate through list of nft_flow entries*/
1270 struct nfp_fl_ct_flow_entry *ct_entry = entry;
1271
1272 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1273 nft_flow_list) {
1274 cleanup_nft_merge_entry(m_entry);
1275 }
1276 } else {
1277 /* Need to iterate through list of tc_merged_flow entries*/
1278 struct nfp_fl_ct_tc_merge *ct_entry = entry;
1279
1280 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1281 tc_merge_list) {
1282 cleanup_nft_merge_entry(m_entry);
1283 }
1284 }
1285 }
1286
nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge * m_ent)1287 static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
1288 {
1289 struct nfp_fl_ct_zone_entry *zt;
1290 int err;
1291
1292 zt = m_ent->zt;
1293 err = rhashtable_remove_fast(&zt->tc_merge_tb,
1294 &m_ent->hash_node,
1295 nfp_tc_ct_merge_params);
1296 if (err)
1297 pr_warn("WARNING: could not remove merge_entry from hashtable\n");
1298 zt->tc_merge_count--;
1299 list_del(&m_ent->post_ct_list);
1300 list_del(&m_ent->pre_ct_list);
1301
1302 if (!list_empty(&m_ent->children))
1303 nfp_free_nft_merge_children(m_ent, false);
1304 kfree(m_ent);
1305 }
1306
nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry * entry)1307 static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
1308 {
1309 struct nfp_fl_ct_tc_merge *m_ent, *tmp;
1310
1311 switch (entry->type) {
1312 case CT_TYPE_PRE_CT:
1313 list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
1314 nfp_del_tc_merge_entry(m_ent);
1315 }
1316 break;
1317 case CT_TYPE_POST_CT:
1318 list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
1319 nfp_del_tc_merge_entry(m_ent);
1320 }
1321 break;
1322 default:
1323 break;
1324 }
1325 }
1326
nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry * entry)1327 void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
1328 {
1329 list_del(&entry->list_node);
1330
1331 if (!list_empty(&entry->children)) {
1332 if (entry->type == CT_TYPE_NFT)
1333 nfp_free_nft_merge_children(entry, true);
1334 else
1335 nfp_free_tc_merge_children(entry);
1336 }
1337
1338 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1339 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1340
1341 if (entry->type == CT_TYPE_NFT) {
1342 struct nf_flow_match *nft_match;
1343
1344 nft_match = container_of(entry->rule->match.dissector,
1345 struct nf_flow_match, dissector);
1346 kfree(nft_match);
1347 }
1348
1349 kfree(entry->rule);
1350 kfree(entry);
1351 }
1352
get_flow_act(struct flow_rule * rule,enum flow_action_id act_id)1353 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
1354 enum flow_action_id act_id)
1355 {
1356 struct flow_action_entry *act = NULL;
1357 int i;
1358
1359 flow_action_for_each(i, act, &rule->action) {
1360 if (act->id == act_id)
1361 return act;
1362 }
1363 return NULL;
1364 }
1365
1366 static void
nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry * ct_entry1,struct nfp_fl_ct_zone_entry * zt_src,struct nfp_fl_ct_zone_entry * zt_dst)1367 nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
1368 struct nfp_fl_ct_zone_entry *zt_src,
1369 struct nfp_fl_ct_zone_entry *zt_dst)
1370 {
1371 struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
1372 struct list_head *ct_list;
1373
1374 if (ct_entry1->type == CT_TYPE_PRE_CT)
1375 ct_list = &zt_src->post_ct_list;
1376 else if (ct_entry1->type == CT_TYPE_POST_CT)
1377 ct_list = &zt_src->pre_ct_list;
1378 else
1379 return;
1380
1381 list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
1382 list_node) {
1383 nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
1384 }
1385 }
1386
1387 static void
nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry * nft_entry,struct nfp_fl_ct_zone_entry * zt)1388 nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
1389 struct nfp_fl_ct_zone_entry *zt)
1390 {
1391 struct nfp_fl_ct_tc_merge *tc_merge_entry;
1392 struct rhashtable_iter iter;
1393
1394 rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
1395 rhashtable_walk_start(&iter);
1396 while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
1397 if (IS_ERR(tc_merge_entry))
1398 continue;
1399 rhashtable_walk_stop(&iter);
1400 nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
1401 rhashtable_walk_start(&iter);
1402 }
1403 rhashtable_walk_stop(&iter);
1404 rhashtable_walk_exit(&iter);
1405 }
1406
nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv * priv,struct net_device * netdev,struct flow_cls_offload * flow,struct netlink_ext_ack * extack)1407 int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
1408 struct net_device *netdev,
1409 struct flow_cls_offload *flow,
1410 struct netlink_ext_ack *extack)
1411 {
1412 struct flow_action_entry *ct_act, *ct_goto;
1413 struct nfp_fl_ct_flow_entry *ct_entry;
1414 struct nfp_fl_ct_zone_entry *zt;
1415 int err;
1416
1417 ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
1418 if (!ct_act) {
1419 NL_SET_ERR_MSG_MOD(extack,
1420 "unsupported offload: Conntrack action empty in conntrack offload");
1421 return -EOPNOTSUPP;
1422 }
1423
1424 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1425 if (!ct_goto) {
1426 NL_SET_ERR_MSG_MOD(extack,
1427 "unsupported offload: Conntrack requires ACTION_GOTO");
1428 return -EOPNOTSUPP;
1429 }
1430
1431 zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
1432 if (IS_ERR(zt)) {
1433 NL_SET_ERR_MSG_MOD(extack,
1434 "offload error: Could not create zone table entry");
1435 return PTR_ERR(zt);
1436 }
1437
1438 if (!zt->nft) {
1439 zt->nft = ct_act->ct.flow_table;
1440 err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
1441 if (err) {
1442 NL_SET_ERR_MSG_MOD(extack,
1443 "offload error: Could not register nft_callback");
1444 return err;
1445 }
1446 }
1447
1448 /* Add entry to pre_ct_list */
1449 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1450 if (IS_ERR(ct_entry))
1451 return PTR_ERR(ct_entry);
1452 ct_entry->type = CT_TYPE_PRE_CT;
1453 ct_entry->chain_index = ct_goto->chain_index;
1454 list_add(&ct_entry->list_node, &zt->pre_ct_list);
1455 zt->pre_ct_count++;
1456
1457 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1458
1459 /* Need to check and merge with tables in the wc_zone as well */
1460 if (priv->ct_zone_wc)
1461 nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
1462
1463 return 0;
1464 }
1465
nfp_fl_ct_handle_post_ct(struct nfp_flower_priv * priv,struct net_device * netdev,struct flow_cls_offload * flow,struct netlink_ext_ack * extack)1466 int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1467 struct net_device *netdev,
1468 struct flow_cls_offload *flow,
1469 struct netlink_ext_ack *extack)
1470 {
1471 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1472 struct nfp_fl_ct_flow_entry *ct_entry;
1473 struct nfp_fl_ct_zone_entry *zt;
1474 bool wildcarded = false;
1475 struct flow_match_ct ct;
1476
1477 flow_rule_match_ct(rule, &ct);
1478 if (!ct.mask->ct_zone) {
1479 wildcarded = true;
1480 } else if (ct.mask->ct_zone != U16_MAX) {
1481 NL_SET_ERR_MSG_MOD(extack,
1482 "unsupported offload: partially wildcarded ct_zone is not supported");
1483 return -EOPNOTSUPP;
1484 }
1485
1486 zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
1487 if (IS_ERR(zt)) {
1488 NL_SET_ERR_MSG_MOD(extack,
1489 "offload error: Could not create zone table entry");
1490 return PTR_ERR(zt);
1491 }
1492
1493 /* Add entry to post_ct_list */
1494 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1495 if (IS_ERR(ct_entry))
1496 return PTR_ERR(ct_entry);
1497
1498 ct_entry->type = CT_TYPE_POST_CT;
1499 ct_entry->chain_index = flow->common.chain_index;
1500 list_add(&ct_entry->list_node, &zt->post_ct_list);
1501 zt->post_ct_count++;
1502
1503 if (wildcarded) {
1504 /* Iterate through all zone tables if not empty, look for merges with
1505 * pre_ct entries and merge them.
1506 */
1507 struct rhashtable_iter iter;
1508 struct nfp_fl_ct_zone_entry *zone_table;
1509
1510 rhashtable_walk_enter(&priv->ct_zone_table, &iter);
1511 rhashtable_walk_start(&iter);
1512 while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
1513 if (IS_ERR(zone_table))
1514 continue;
1515 rhashtable_walk_stop(&iter);
1516 nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
1517 rhashtable_walk_start(&iter);
1518 }
1519 rhashtable_walk_stop(&iter);
1520 rhashtable_walk_exit(&iter);
1521 } else {
1522 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1523 }
1524
1525 return 0;
1526 }
1527
1528 static void
nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge * nft_merge,enum ct_entry_type type,u64 * m_pkts,u64 * m_bytes,u64 * m_used)1529 nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
1530 enum ct_entry_type type, u64 *m_pkts,
1531 u64 *m_bytes, u64 *m_used)
1532 {
1533 struct nfp_flower_priv *priv = nft_merge->zt->priv;
1534 struct nfp_fl_payload *nfp_flow;
1535 u32 ctx_id;
1536
1537 nfp_flow = nft_merge->flow_pay;
1538 if (!nfp_flow)
1539 return;
1540
1541 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1542 *m_pkts += priv->stats[ctx_id].pkts;
1543 *m_bytes += priv->stats[ctx_id].bytes;
1544 *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
1545
1546 /* If request is for a sub_flow which is part of a tunnel merged
1547 * flow then update stats from tunnel merged flows first.
1548 */
1549 if (!list_empty(&nfp_flow->linked_flows))
1550 nfp_flower_update_merge_stats(priv->app, nfp_flow);
1551
1552 if (type != CT_TYPE_NFT) {
1553 /* Update nft cached stats */
1554 flow_stats_update(&nft_merge->nft_parent->stats,
1555 priv->stats[ctx_id].bytes,
1556 priv->stats[ctx_id].pkts,
1557 0, priv->stats[ctx_id].used,
1558 FLOW_ACTION_HW_STATS_DELAYED);
1559 } else {
1560 /* Update pre_ct cached stats */
1561 flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
1562 priv->stats[ctx_id].bytes,
1563 priv->stats[ctx_id].pkts,
1564 0, priv->stats[ctx_id].used,
1565 FLOW_ACTION_HW_STATS_DELAYED);
1566 /* Update post_ct cached stats */
1567 flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
1568 priv->stats[ctx_id].bytes,
1569 priv->stats[ctx_id].pkts,
1570 0, priv->stats[ctx_id].used,
1571 FLOW_ACTION_HW_STATS_DELAYED);
1572 }
1573 /* Reset stats from the nfp */
1574 priv->stats[ctx_id].pkts = 0;
1575 priv->stats[ctx_id].bytes = 0;
1576 }
1577
nfp_fl_ct_stats(struct flow_cls_offload * flow,struct nfp_fl_ct_map_entry * ct_map_ent)1578 int nfp_fl_ct_stats(struct flow_cls_offload *flow,
1579 struct nfp_fl_ct_map_entry *ct_map_ent)
1580 {
1581 struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
1582 struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
1583 struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
1584
1585 u64 pkts = 0, bytes = 0, used = 0;
1586 u64 m_pkts, m_bytes, m_used;
1587
1588 spin_lock_bh(&ct_entry->zt->priv->stats_lock);
1589
1590 if (ct_entry->type == CT_TYPE_PRE_CT) {
1591 /* Iterate tc_merge entries associated with this flow */
1592 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
1593 pre_ct_list) {
1594 m_pkts = 0;
1595 m_bytes = 0;
1596 m_used = 0;
1597 /* Iterate nft_merge entries associated with this tc_merge flow */
1598 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
1599 tc_merge_list) {
1600 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
1601 &m_pkts, &m_bytes, &m_used);
1602 }
1603 pkts += m_pkts;
1604 bytes += m_bytes;
1605 used = max_t(u64, used, m_used);
1606 /* Update post_ct partner */
1607 flow_stats_update(&tc_merge->post_ct_parent->stats,
1608 m_bytes, m_pkts, 0, m_used,
1609 FLOW_ACTION_HW_STATS_DELAYED);
1610 }
1611 } else if (ct_entry->type == CT_TYPE_POST_CT) {
1612 /* Iterate tc_merge entries associated with this flow */
1613 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
1614 post_ct_list) {
1615 m_pkts = 0;
1616 m_bytes = 0;
1617 m_used = 0;
1618 /* Iterate nft_merge entries associated with this tc_merge flow */
1619 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
1620 tc_merge_list) {
1621 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
1622 &m_pkts, &m_bytes, &m_used);
1623 }
1624 pkts += m_pkts;
1625 bytes += m_bytes;
1626 used = max_t(u64, used, m_used);
1627 /* Update pre_ct partner */
1628 flow_stats_update(&tc_merge->pre_ct_parent->stats,
1629 m_bytes, m_pkts, 0, m_used,
1630 FLOW_ACTION_HW_STATS_DELAYED);
1631 }
1632 } else {
1633 /* Iterate nft_merge entries associated with this nft flow */
1634 list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
1635 nft_flow_list) {
1636 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
1637 &pkts, &bytes, &used);
1638 }
1639 }
1640
1641 /* Add stats from this request to stats potentially cached by
1642 * previous requests.
1643 */
1644 flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
1645 FLOW_ACTION_HW_STATS_DELAYED);
1646 /* Finally update the flow stats from the original stats request */
1647 flow_stats_update(&flow->stats, ct_entry->stats.bytes,
1648 ct_entry->stats.pkts, 0,
1649 ct_entry->stats.lastused,
1650 FLOW_ACTION_HW_STATS_DELAYED);
1651 /* Stats has been synced to original flow, can now clear
1652 * the cache.
1653 */
1654 ct_entry->stats.pkts = 0;
1655 ct_entry->stats.bytes = 0;
1656 spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
1657
1658 return 0;
1659 }
1660
1661 static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry * zt,struct flow_cls_offload * flow)1662 nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
1663 {
1664 struct nfp_fl_ct_map_entry *ct_map_ent;
1665 struct nfp_fl_ct_flow_entry *ct_entry;
1666 struct netlink_ext_ack *extack = NULL;
1667
1668 extack = flow->common.extack;
1669 switch (flow->command) {
1670 case FLOW_CLS_REPLACE:
1671 /* Netfilter can request offload multiple times for the same
1672 * flow - protect against adding duplicates.
1673 */
1674 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1675 nfp_ct_map_params);
1676 if (!ct_map_ent) {
1677 ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
1678 if (IS_ERR(ct_entry))
1679 return PTR_ERR(ct_entry);
1680 ct_entry->type = CT_TYPE_NFT;
1681 list_add(&ct_entry->list_node, &zt->nft_flows_list);
1682 zt->nft_flows_count++;
1683 nfp_ct_merge_nft_with_tc(ct_entry, zt);
1684 }
1685 return 0;
1686 case FLOW_CLS_DESTROY:
1687 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1688 nfp_ct_map_params);
1689 return nfp_fl_ct_del_flow(ct_map_ent);
1690 case FLOW_CLS_STATS:
1691 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1692 nfp_ct_map_params);
1693 if (ct_map_ent)
1694 return nfp_fl_ct_stats(flow, ct_map_ent);
1695 break;
1696 default:
1697 break;
1698 }
1699 return -EINVAL;
1700 }
1701
nfp_fl_ct_handle_nft_flow(enum tc_setup_type type,void * type_data,void * cb_priv)1702 int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
1703 {
1704 struct flow_cls_offload *flow = type_data;
1705 struct nfp_fl_ct_zone_entry *zt = cb_priv;
1706 int err = -EOPNOTSUPP;
1707
1708 switch (type) {
1709 case TC_SETUP_CLSFLOWER:
1710 while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
1711 if (!zt->nft) /* avoid deadlock */
1712 return err;
1713 msleep(20);
1714 }
1715 err = nfp_fl_ct_offload_nft_flow(zt, flow);
1716 mutex_unlock(&zt->priv->nfp_fl_lock);
1717 break;
1718 default:
1719 return -EOPNOTSUPP;
1720 }
1721 return err;
1722 }
1723
1724 static void
nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry * zt)1725 nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
1726 {
1727 struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
1728 struct nfp_fl_ct_map_entry *ct_map_ent;
1729
1730 list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
1731 list_node) {
1732 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
1733 &nft_entry->cookie,
1734 nfp_ct_map_params);
1735 nfp_fl_ct_del_flow(ct_map_ent);
1736 }
1737 }
1738
nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry * ct_map_ent)1739 int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
1740 {
1741 struct nfp_fl_ct_flow_entry *ct_entry;
1742 struct nfp_fl_ct_zone_entry *zt;
1743 struct rhashtable *m_table;
1744 struct nf_flowtable *nft;
1745
1746 if (!ct_map_ent)
1747 return -ENOENT;
1748
1749 zt = ct_map_ent->ct_entry->zt;
1750 ct_entry = ct_map_ent->ct_entry;
1751 m_table = &zt->priv->ct_map_table;
1752
1753 switch (ct_entry->type) {
1754 case CT_TYPE_PRE_CT:
1755 zt->pre_ct_count--;
1756 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1757 nfp_ct_map_params);
1758 nfp_fl_ct_clean_flow_entry(ct_entry);
1759 kfree(ct_map_ent);
1760
1761 if (!zt->pre_ct_count && zt->nft) {
1762 nft = zt->nft;
1763 zt->nft = NULL; /* avoid deadlock */
1764 nf_flow_table_offload_del_cb(nft,
1765 nfp_fl_ct_handle_nft_flow,
1766 zt);
1767 nfp_fl_ct_clean_nft_entries(zt);
1768 }
1769 break;
1770 case CT_TYPE_POST_CT:
1771 zt->post_ct_count--;
1772 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1773 nfp_ct_map_params);
1774 nfp_fl_ct_clean_flow_entry(ct_entry);
1775 kfree(ct_map_ent);
1776 break;
1777 case CT_TYPE_NFT:
1778 zt->nft_flows_count--;
1779 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1780 nfp_ct_map_params);
1781 nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
1782 kfree(ct_map_ent);
1783 break;
1784 default:
1785 break;
1786 }
1787
1788 return 0;
1789 }
1790