1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <uapi/linux/netfilter/nf_nat.h>
36
37 static struct workqueue_struct *act_ct_wq;
38 static struct rhashtable zones_ht;
39 static DEFINE_MUTEX(zones_mutex);
40
41 struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
46 refcount_t ref;
47 u16 zone;
48
49 bool dying;
50 };
51
52 static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57 };
58
59 static struct flow_action_entry *
tcf_ct_flow_table_flow_action_get_next(struct flow_action * flow_action)60 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61 {
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65 }
66
tcf_ct_add_mangle_action(struct flow_action * action,enum flow_action_mangle_base htype,u32 offset,u32 mask,u32 val)67 static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72 {
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81 }
82
83 /* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87 static void
tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)88 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91 {
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102 }
103
104 static void
tcf_ct_add_ipv6_addr_mangle_action(struct flow_action * action,union nf_inet_addr * addr,u32 offset)105 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108 {
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115 }
116
117 static void
tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)118 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121 {
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130 }
131
132 static void
tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)133 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136 {
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148 }
149
150 static void
tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)151 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154 {
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166 }
167
tcf_ct_flow_table_add_action_meta(struct nf_conn * ct,enum ip_conntrack_dir dir,struct flow_action * action)168 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171 {
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181 #endif
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186
187 act_ct_labels = entry->ct_metadata.labels;
188 ct_labels = nf_ct_labels_find(ct);
189 if (ct_labels)
190 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
191 else
192 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
193 }
194
tcf_ct_flow_table_add_action_nat(struct net * net,struct nf_conn * ct,enum ip_conntrack_dir dir,struct flow_action * action)195 static int tcf_ct_flow_table_add_action_nat(struct net *net,
196 struct nf_conn *ct,
197 enum ip_conntrack_dir dir,
198 struct flow_action *action)
199 {
200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201 struct nf_conntrack_tuple target;
202
203 if (!(ct->status & IPS_NAT_MASK))
204 return 0;
205
206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
207
208 switch (tuple->src.l3num) {
209 case NFPROTO_IPV4:
210 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
211 action);
212 break;
213 case NFPROTO_IPV6:
214 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
215 action);
216 break;
217 default:
218 return -EOPNOTSUPP;
219 }
220
221 switch (nf_ct_protonum(ct)) {
222 case IPPROTO_TCP:
223 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
224 break;
225 case IPPROTO_UDP:
226 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
227 break;
228 default:
229 return -EOPNOTSUPP;
230 }
231
232 return 0;
233 }
234
tcf_ct_flow_table_fill_actions(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir tdir,struct nf_flow_rule * flow_rule)235 static int tcf_ct_flow_table_fill_actions(struct net *net,
236 const struct flow_offload *flow,
237 enum flow_offload_tuple_dir tdir,
238 struct nf_flow_rule *flow_rule)
239 {
240 struct flow_action *action = &flow_rule->rule->action;
241 int num_entries = action->num_entries;
242 struct nf_conn *ct = flow->ct;
243 enum ip_conntrack_dir dir;
244 int i, err;
245
246 switch (tdir) {
247 case FLOW_OFFLOAD_DIR_ORIGINAL:
248 dir = IP_CT_DIR_ORIGINAL;
249 break;
250 case FLOW_OFFLOAD_DIR_REPLY:
251 dir = IP_CT_DIR_REPLY;
252 break;
253 default:
254 return -EOPNOTSUPP;
255 }
256
257 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
258 if (err)
259 goto err_nat;
260
261 tcf_ct_flow_table_add_action_meta(ct, dir, action);
262 return 0;
263
264 err_nat:
265 /* Clear filled actions */
266 for (i = num_entries; i < action->num_entries; i++)
267 memset(&action->entries[i], 0, sizeof(action->entries[i]));
268 action->num_entries = num_entries;
269
270 return err;
271 }
272
273 static struct nf_flowtable_type flowtable_ct = {
274 .action = tcf_ct_flow_table_fill_actions,
275 .owner = THIS_MODULE,
276 };
277
tcf_ct_flow_table_get(struct tcf_ct_params * params)278 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
279 {
280 struct tcf_ct_flow_table *ct_ft;
281 int err = -ENOMEM;
282
283 mutex_lock(&zones_mutex);
284 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
285 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
286 goto out_unlock;
287
288 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
289 if (!ct_ft)
290 goto err_alloc;
291 refcount_set(&ct_ft->ref, 1);
292
293 ct_ft->zone = params->zone;
294 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
295 if (err)
296 goto err_insert;
297
298 ct_ft->nf_ft.type = &flowtable_ct;
299 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
300 err = nf_flow_table_init(&ct_ft->nf_ft);
301 if (err)
302 goto err_init;
303
304 __module_get(THIS_MODULE);
305 out_unlock:
306 params->ct_ft = ct_ft;
307 params->nf_ft = &ct_ft->nf_ft;
308 mutex_unlock(&zones_mutex);
309
310 return 0;
311
312 err_init:
313 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
314 err_insert:
315 kfree(ct_ft);
316 err_alloc:
317 mutex_unlock(&zones_mutex);
318 return err;
319 }
320
tcf_ct_flow_table_cleanup_work(struct work_struct * work)321 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
322 {
323 struct flow_block_cb *block_cb, *tmp_cb;
324 struct tcf_ct_flow_table *ct_ft;
325 struct flow_block *block;
326
327 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
328 rwork);
329 nf_flow_table_free(&ct_ft->nf_ft);
330
331 /* Remove any remaining callbacks before cleanup */
332 block = &ct_ft->nf_ft.flow_block;
333 down_write(&ct_ft->nf_ft.flow_block_lock);
334 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
335 list_del(&block_cb->list);
336 flow_block_cb_free(block_cb);
337 }
338 up_write(&ct_ft->nf_ft.flow_block_lock);
339 kfree(ct_ft);
340
341 module_put(THIS_MODULE);
342 }
343
tcf_ct_flow_table_put(struct tcf_ct_params * params)344 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
345 {
346 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
347
348 if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
349 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
350 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
351 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
352 }
353 }
354
tcf_ct_flow_table_add(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,bool tcp)355 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
356 struct nf_conn *ct,
357 bool tcp)
358 {
359 struct flow_offload *entry;
360 int err;
361
362 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
363 return;
364
365 entry = flow_offload_alloc(ct);
366 if (!entry) {
367 WARN_ON_ONCE(1);
368 goto err_alloc;
369 }
370
371 if (tcp) {
372 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
373 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
374 }
375
376 err = flow_offload_add(&ct_ft->nf_ft, entry);
377 if (err)
378 goto err_add;
379
380 return;
381
382 err_add:
383 flow_offload_free(entry);
384 err_alloc:
385 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
386 }
387
tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,enum ip_conntrack_info ctinfo)388 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
389 struct nf_conn *ct,
390 enum ip_conntrack_info ctinfo)
391 {
392 bool tcp = false;
393
394 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
395 return;
396
397 switch (nf_ct_protonum(ct)) {
398 case IPPROTO_TCP:
399 tcp = true;
400 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
401 return;
402 break;
403 case IPPROTO_UDP:
404 break;
405 default:
406 return;
407 }
408
409 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
410 ct->status & IPS_SEQ_ADJUST)
411 return;
412
413 tcf_ct_flow_table_add(ct_ft, ct, tcp);
414 }
415
416 static bool
tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)417 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
418 struct flow_offload_tuple *tuple,
419 struct tcphdr **tcph)
420 {
421 struct flow_ports *ports;
422 unsigned int thoff;
423 struct iphdr *iph;
424
425 if (!pskb_network_may_pull(skb, sizeof(*iph)))
426 return false;
427
428 iph = ip_hdr(skb);
429 thoff = iph->ihl * 4;
430
431 if (ip_is_fragment(iph) ||
432 unlikely(thoff != sizeof(struct iphdr)))
433 return false;
434
435 if (iph->protocol != IPPROTO_TCP &&
436 iph->protocol != IPPROTO_UDP)
437 return false;
438
439 if (iph->ttl <= 1)
440 return false;
441
442 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
443 thoff + sizeof(struct tcphdr) :
444 thoff + sizeof(*ports)))
445 return false;
446
447 iph = ip_hdr(skb);
448 if (iph->protocol == IPPROTO_TCP)
449 *tcph = (void *)(skb_network_header(skb) + thoff);
450
451 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
452 tuple->src_v4.s_addr = iph->saddr;
453 tuple->dst_v4.s_addr = iph->daddr;
454 tuple->src_port = ports->source;
455 tuple->dst_port = ports->dest;
456 tuple->l3proto = AF_INET;
457 tuple->l4proto = iph->protocol;
458
459 return true;
460 }
461
462 static bool
tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)463 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
464 struct flow_offload_tuple *tuple,
465 struct tcphdr **tcph)
466 {
467 struct flow_ports *ports;
468 struct ipv6hdr *ip6h;
469 unsigned int thoff;
470
471 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
472 return false;
473
474 ip6h = ipv6_hdr(skb);
475
476 if (ip6h->nexthdr != IPPROTO_TCP &&
477 ip6h->nexthdr != IPPROTO_UDP)
478 return false;
479
480 if (ip6h->hop_limit <= 1)
481 return false;
482
483 thoff = sizeof(*ip6h);
484 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
485 thoff + sizeof(struct tcphdr) :
486 thoff + sizeof(*ports)))
487 return false;
488
489 ip6h = ipv6_hdr(skb);
490 if (ip6h->nexthdr == IPPROTO_TCP)
491 *tcph = (void *)(skb_network_header(skb) + thoff);
492
493 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
494 tuple->src_v6 = ip6h->saddr;
495 tuple->dst_v6 = ip6h->daddr;
496 tuple->src_port = ports->source;
497 tuple->dst_port = ports->dest;
498 tuple->l3proto = AF_INET6;
499 tuple->l4proto = ip6h->nexthdr;
500
501 return true;
502 }
503
tcf_ct_flow_table_lookup(struct tcf_ct_params * p,struct sk_buff * skb,u8 family)504 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
505 struct sk_buff *skb,
506 u8 family)
507 {
508 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
509 struct flow_offload_tuple_rhash *tuplehash;
510 struct flow_offload_tuple tuple = {};
511 enum ip_conntrack_info ctinfo;
512 struct tcphdr *tcph = NULL;
513 struct flow_offload *flow;
514 struct nf_conn *ct;
515 u8 dir;
516
517 /* Previously seen or loopback */
518 ct = nf_ct_get(skb, &ctinfo);
519 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
520 return false;
521
522 switch (family) {
523 case NFPROTO_IPV4:
524 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
525 return false;
526 break;
527 case NFPROTO_IPV6:
528 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
529 return false;
530 break;
531 default:
532 return false;
533 }
534
535 tuplehash = flow_offload_lookup(nf_ft, &tuple);
536 if (!tuplehash)
537 return false;
538
539 dir = tuplehash->tuple.dir;
540 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
541 ct = flow->ct;
542
543 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
544 flow_offload_teardown(flow);
545 return false;
546 }
547
548 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
549 IP_CT_ESTABLISHED_REPLY;
550
551 flow_offload_refresh(nf_ft, flow);
552 nf_conntrack_get(&ct->ct_general);
553 nf_ct_set(skb, ct, ctinfo);
554 nf_ct_acct_update(ct, dir, skb->len);
555
556 return true;
557 }
558
tcf_ct_flow_tables_init(void)559 static int tcf_ct_flow_tables_init(void)
560 {
561 return rhashtable_init(&zones_ht, &zones_params);
562 }
563
tcf_ct_flow_tables_uninit(void)564 static void tcf_ct_flow_tables_uninit(void)
565 {
566 rhashtable_destroy(&zones_ht);
567 }
568
569 static struct tc_action_ops act_ct_ops;
570 static unsigned int ct_net_id;
571
572 struct tc_ct_action_net {
573 struct tc_action_net tn; /* Must be first */
574 bool labels;
575 };
576
577 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
tcf_ct_skb_nfct_cached(struct net * net,struct sk_buff * skb,u16 zone_id,bool force)578 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
579 u16 zone_id, bool force)
580 {
581 enum ip_conntrack_info ctinfo;
582 struct nf_conn *ct;
583
584 ct = nf_ct_get(skb, &ctinfo);
585 if (!ct)
586 return false;
587 if (!net_eq(net, read_pnet(&ct->ct_net)))
588 return false;
589 if (nf_ct_zone(ct)->id != zone_id)
590 return false;
591
592 /* Force conntrack entry direction. */
593 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
594 if (nf_ct_is_confirmed(ct))
595 nf_ct_kill(ct);
596
597 nf_conntrack_put(&ct->ct_general);
598 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
599
600 return false;
601 }
602
603 return true;
604 }
605
606 /* Trim the skb to the length specified by the IP/IPv6 header,
607 * removing any trailing lower-layer padding. This prepares the skb
608 * for higher-layer processing that assumes skb->len excludes padding
609 * (such as nf_ip_checksum). The caller needs to pull the skb to the
610 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
611 */
tcf_ct_skb_network_trim(struct sk_buff * skb,int family)612 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
613 {
614 unsigned int len;
615 int err;
616
617 switch (family) {
618 case NFPROTO_IPV4:
619 len = ntohs(ip_hdr(skb)->tot_len);
620 break;
621 case NFPROTO_IPV6:
622 len = sizeof(struct ipv6hdr)
623 + ntohs(ipv6_hdr(skb)->payload_len);
624 break;
625 default:
626 len = skb->len;
627 }
628
629 err = pskb_trim_rcsum(skb, len);
630
631 return err;
632 }
633
tcf_ct_skb_nf_family(struct sk_buff * skb)634 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
635 {
636 u8 family = NFPROTO_UNSPEC;
637
638 switch (skb_protocol(skb, true)) {
639 case htons(ETH_P_IP):
640 family = NFPROTO_IPV4;
641 break;
642 case htons(ETH_P_IPV6):
643 family = NFPROTO_IPV6;
644 break;
645 default:
646 break;
647 }
648
649 return family;
650 }
651
tcf_ct_ipv4_is_fragment(struct sk_buff * skb,bool * frag)652 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
653 {
654 unsigned int len;
655
656 len = skb_network_offset(skb) + sizeof(struct iphdr);
657 if (unlikely(skb->len < len))
658 return -EINVAL;
659 if (unlikely(!pskb_may_pull(skb, len)))
660 return -ENOMEM;
661
662 *frag = ip_is_fragment(ip_hdr(skb));
663 return 0;
664 }
665
tcf_ct_ipv6_is_fragment(struct sk_buff * skb,bool * frag)666 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
667 {
668 unsigned int flags = 0, len, payload_ofs = 0;
669 unsigned short frag_off;
670 int nexthdr;
671
672 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
673 if (unlikely(skb->len < len))
674 return -EINVAL;
675 if (unlikely(!pskb_may_pull(skb, len)))
676 return -ENOMEM;
677
678 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
679 if (unlikely(nexthdr < 0))
680 return -EPROTO;
681
682 *frag = flags & IP6_FH_F_FRAG;
683 return 0;
684 }
685
tcf_ct_handle_fragments(struct net * net,struct sk_buff * skb,u8 family,u16 zone,bool * defrag)686 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
687 u8 family, u16 zone, bool *defrag)
688 {
689 enum ip_conntrack_info ctinfo;
690 struct qdisc_skb_cb cb;
691 struct nf_conn *ct;
692 int err = 0;
693 bool frag;
694
695 /* Previously seen (loopback)? Ignore. */
696 ct = nf_ct_get(skb, &ctinfo);
697 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
698 return 0;
699
700 if (family == NFPROTO_IPV4)
701 err = tcf_ct_ipv4_is_fragment(skb, &frag);
702 else
703 err = tcf_ct_ipv6_is_fragment(skb, &frag);
704 if (err || !frag)
705 return err;
706
707 skb_get(skb);
708 cb = *qdisc_skb_cb(skb);
709
710 if (family == NFPROTO_IPV4) {
711 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
712
713 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
714 local_bh_disable();
715 err = ip_defrag(net, skb, user);
716 local_bh_enable();
717 if (err && err != -EINPROGRESS)
718 return err;
719
720 if (!err) {
721 *defrag = true;
722 cb.mru = IPCB(skb)->frag_max_size;
723 }
724 } else { /* NFPROTO_IPV6 */
725 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
726 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
727
728 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
729 err = nf_ct_frag6_gather(net, skb, user);
730 if (err && err != -EINPROGRESS)
731 goto out_free;
732
733 if (!err) {
734 *defrag = true;
735 cb.mru = IP6CB(skb)->frag_max_size;
736 }
737 #else
738 err = -EOPNOTSUPP;
739 goto out_free;
740 #endif
741 }
742
743 *qdisc_skb_cb(skb) = cb;
744 skb_clear_hash(skb);
745 skb->ignore_df = 1;
746 return err;
747
748 out_free:
749 kfree_skb(skb);
750 return err;
751 }
752
tcf_ct_params_free(struct rcu_head * head)753 static void tcf_ct_params_free(struct rcu_head *head)
754 {
755 struct tcf_ct_params *params = container_of(head,
756 struct tcf_ct_params, rcu);
757
758 tcf_ct_flow_table_put(params);
759
760 if (params->tmpl)
761 nf_conntrack_put(¶ms->tmpl->ct_general);
762 kfree(params);
763 }
764
765 #if IS_ENABLED(CONFIG_NF_NAT)
766 /* Modelled after nf_nat_ipv[46]_fn().
767 * range is only used for new, uninitialized NAT state.
768 * Returns either NF_ACCEPT or NF_DROP.
769 */
ct_nat_execute(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct nf_nat_range2 * range,enum nf_nat_manip_type maniptype)770 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
771 enum ip_conntrack_info ctinfo,
772 const struct nf_nat_range2 *range,
773 enum nf_nat_manip_type maniptype)
774 {
775 __be16 proto = skb_protocol(skb, true);
776 int hooknum, err = NF_ACCEPT;
777
778 /* See HOOK2MANIP(). */
779 if (maniptype == NF_NAT_MANIP_SRC)
780 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
781 else
782 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
783
784 switch (ctinfo) {
785 case IP_CT_RELATED:
786 case IP_CT_RELATED_REPLY:
787 if (proto == htons(ETH_P_IP) &&
788 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
789 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
790 hooknum))
791 err = NF_DROP;
792 goto out;
793 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
794 __be16 frag_off;
795 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
796 int hdrlen = ipv6_skip_exthdr(skb,
797 sizeof(struct ipv6hdr),
798 &nexthdr, &frag_off);
799
800 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
801 if (!nf_nat_icmpv6_reply_translation(skb, ct,
802 ctinfo,
803 hooknum,
804 hdrlen))
805 err = NF_DROP;
806 goto out;
807 }
808 }
809 /* Non-ICMP, fall thru to initialize if needed. */
810 fallthrough;
811 case IP_CT_NEW:
812 /* Seen it before? This can happen for loopback, retrans,
813 * or local packets.
814 */
815 if (!nf_nat_initialized(ct, maniptype)) {
816 /* Initialize according to the NAT action. */
817 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
818 /* Action is set up to establish a new
819 * mapping.
820 */
821 ? nf_nat_setup_info(ct, range, maniptype)
822 : nf_nat_alloc_null_binding(ct, hooknum);
823 if (err != NF_ACCEPT)
824 goto out;
825 }
826 break;
827
828 case IP_CT_ESTABLISHED:
829 case IP_CT_ESTABLISHED_REPLY:
830 break;
831
832 default:
833 err = NF_DROP;
834 goto out;
835 }
836
837 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
838 out:
839 return err;
840 }
841 #endif /* CONFIG_NF_NAT */
842
tcf_ct_act_set_mark(struct nf_conn * ct,u32 mark,u32 mask)843 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
844 {
845 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
846 u32 new_mark;
847
848 if (!mask)
849 return;
850
851 new_mark = mark | (ct->mark & ~(mask));
852 if (ct->mark != new_mark) {
853 ct->mark = new_mark;
854 if (nf_ct_is_confirmed(ct))
855 nf_conntrack_event_cache(IPCT_MARK, ct);
856 }
857 #endif
858 }
859
tcf_ct_act_set_labels(struct nf_conn * ct,u32 * labels,u32 * labels_m)860 static void tcf_ct_act_set_labels(struct nf_conn *ct,
861 u32 *labels,
862 u32 *labels_m)
863 {
864 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
865 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
866
867 if (!memchr_inv(labels_m, 0, labels_sz))
868 return;
869
870 nf_connlabels_replace(ct, labels, labels_m, 4);
871 #endif
872 }
873
tcf_ct_act_nat(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,int ct_action,struct nf_nat_range2 * range,bool commit)874 static int tcf_ct_act_nat(struct sk_buff *skb,
875 struct nf_conn *ct,
876 enum ip_conntrack_info ctinfo,
877 int ct_action,
878 struct nf_nat_range2 *range,
879 bool commit)
880 {
881 #if IS_ENABLED(CONFIG_NF_NAT)
882 int err;
883 enum nf_nat_manip_type maniptype;
884
885 if (!(ct_action & TCA_CT_ACT_NAT))
886 return NF_ACCEPT;
887
888 /* Add NAT extension if not confirmed yet. */
889 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
890 return NF_DROP; /* Can't NAT. */
891
892 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
893 (ctinfo != IP_CT_RELATED || commit)) {
894 /* NAT an established or related connection like before. */
895 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
896 /* This is the REPLY direction for a connection
897 * for which NAT was applied in the forward
898 * direction. Do the reverse NAT.
899 */
900 maniptype = ct->status & IPS_SRC_NAT
901 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
902 else
903 maniptype = ct->status & IPS_SRC_NAT
904 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
905 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
906 maniptype = NF_NAT_MANIP_SRC;
907 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
908 maniptype = NF_NAT_MANIP_DST;
909 } else {
910 return NF_ACCEPT;
911 }
912
913 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
914 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
915 if (ct->status & IPS_SRC_NAT) {
916 if (maniptype == NF_NAT_MANIP_SRC)
917 maniptype = NF_NAT_MANIP_DST;
918 else
919 maniptype = NF_NAT_MANIP_SRC;
920
921 err = ct_nat_execute(skb, ct, ctinfo, range,
922 maniptype);
923 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
924 err = ct_nat_execute(skb, ct, ctinfo, NULL,
925 NF_NAT_MANIP_SRC);
926 }
927 }
928 return err;
929 #else
930 return NF_ACCEPT;
931 #endif
932 }
933
tcf_ct_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)934 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
935 struct tcf_result *res)
936 {
937 struct net *net = dev_net(skb->dev);
938 bool cached, commit, clear, force;
939 enum ip_conntrack_info ctinfo;
940 struct tcf_ct *c = to_ct(a);
941 struct nf_conn *tmpl = NULL;
942 struct nf_hook_state state;
943 int nh_ofs, err, retval;
944 struct tcf_ct_params *p;
945 bool skip_add = false;
946 bool defrag = false;
947 struct nf_conn *ct;
948 u8 family;
949
950 p = rcu_dereference_bh(c->params);
951
952 retval = READ_ONCE(c->tcf_action);
953 commit = p->ct_action & TCA_CT_ACT_COMMIT;
954 clear = p->ct_action & TCA_CT_ACT_CLEAR;
955 force = p->ct_action & TCA_CT_ACT_FORCE;
956 tmpl = p->tmpl;
957
958 tcf_lastuse_update(&c->tcf_tm);
959
960 if (clear) {
961 ct = nf_ct_get(skb, &ctinfo);
962 if (ct) {
963 nf_conntrack_put(&ct->ct_general);
964 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
965 }
966
967 goto out;
968 }
969
970 family = tcf_ct_skb_nf_family(skb);
971 if (family == NFPROTO_UNSPEC)
972 goto drop;
973
974 /* The conntrack module expects to be working at L3.
975 * We also try to pull the IPv4/6 header to linear area
976 */
977 nh_ofs = skb_network_offset(skb);
978 skb_pull_rcsum(skb, nh_ofs);
979 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
980 if (err == -EINPROGRESS) {
981 retval = TC_ACT_STOLEN;
982 goto out;
983 }
984 if (err)
985 goto drop;
986
987 err = tcf_ct_skb_network_trim(skb, family);
988 if (err)
989 goto drop;
990
991 /* If we are recirculating packets to match on ct fields and
992 * committing with a separate ct action, then we don't need to
993 * actually run the packet through conntrack twice unless it's for a
994 * different zone.
995 */
996 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
997 if (!cached) {
998 if (tcf_ct_flow_table_lookup(p, skb, family)) {
999 skip_add = true;
1000 goto do_nat;
1001 }
1002
1003 /* Associate skb with specified zone. */
1004 if (tmpl) {
1005 ct = nf_ct_get(skb, &ctinfo);
1006 if (skb_nfct(skb))
1007 nf_conntrack_put(skb_nfct(skb));
1008 nf_conntrack_get(&tmpl->ct_general);
1009 nf_ct_set(skb, tmpl, IP_CT_NEW);
1010 }
1011
1012 state.hook = NF_INET_PRE_ROUTING;
1013 state.net = net;
1014 state.pf = family;
1015 err = nf_conntrack_in(skb, &state);
1016 if (err != NF_ACCEPT)
1017 goto out_push;
1018 }
1019
1020 do_nat:
1021 ct = nf_ct_get(skb, &ctinfo);
1022 if (!ct)
1023 goto out_push;
1024 nf_ct_deliver_cached_events(ct);
1025
1026 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1027 if (err != NF_ACCEPT)
1028 goto drop;
1029
1030 if (commit) {
1031 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1032 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1033
1034 /* This will take care of sending queued events
1035 * even if the connection is already confirmed.
1036 */
1037 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1038 goto drop;
1039 }
1040
1041 if (!skip_add)
1042 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1043
1044 out_push:
1045 skb_push_rcsum(skb, nh_ofs);
1046
1047 out:
1048 tcf_action_update_bstats(&c->common, skb);
1049 if (defrag)
1050 qdisc_skb_cb(skb)->pkt_len = skb->len;
1051 return retval;
1052
1053 drop:
1054 tcf_action_inc_drop_qstats(&c->common);
1055 return TC_ACT_SHOT;
1056 }
1057
1058 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1059 [TCA_CT_ACTION] = { .type = NLA_U16 },
1060 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1061 [TCA_CT_ZONE] = { .type = NLA_U16 },
1062 [TCA_CT_MARK] = { .type = NLA_U32 },
1063 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1064 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1065 .len = 128 / BITS_PER_BYTE },
1066 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1067 .len = 128 / BITS_PER_BYTE },
1068 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1069 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1070 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1071 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1072 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1073 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1074 };
1075
tcf_ct_fill_params_nat(struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1076 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1077 struct tc_ct *parm,
1078 struct nlattr **tb,
1079 struct netlink_ext_ack *extack)
1080 {
1081 struct nf_nat_range2 *range;
1082
1083 if (!(p->ct_action & TCA_CT_ACT_NAT))
1084 return 0;
1085
1086 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1087 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1088 return -EOPNOTSUPP;
1089 }
1090
1091 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1092 return 0;
1093
1094 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1095 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1096 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1097 return -EOPNOTSUPP;
1098 }
1099
1100 range = &p->range;
1101 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1102 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1103
1104 p->ipv4_range = true;
1105 range->flags |= NF_NAT_RANGE_MAP_IPS;
1106 range->min_addr.ip =
1107 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1108
1109 range->max_addr.ip = max_attr ?
1110 nla_get_in_addr(max_attr) :
1111 range->min_addr.ip;
1112 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1113 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1114
1115 p->ipv4_range = false;
1116 range->flags |= NF_NAT_RANGE_MAP_IPS;
1117 range->min_addr.in6 =
1118 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1119
1120 range->max_addr.in6 = max_attr ?
1121 nla_get_in6_addr(max_attr) :
1122 range->min_addr.in6;
1123 }
1124
1125 if (tb[TCA_CT_NAT_PORT_MIN]) {
1126 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1127 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1128
1129 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1130 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1131 range->min_proto.all;
1132 }
1133
1134 return 0;
1135 }
1136
tcf_ct_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)1137 static void tcf_ct_set_key_val(struct nlattr **tb,
1138 void *val, int val_type,
1139 void *mask, int mask_type,
1140 int len)
1141 {
1142 if (!tb[val_type])
1143 return;
1144 nla_memcpy(val, tb[val_type], len);
1145
1146 if (!mask)
1147 return;
1148
1149 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1150 memset(mask, 0xff, len);
1151 else
1152 nla_memcpy(mask, tb[mask_type], len);
1153 }
1154
tcf_ct_fill_params(struct net * net,struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1155 static int tcf_ct_fill_params(struct net *net,
1156 struct tcf_ct_params *p,
1157 struct tc_ct *parm,
1158 struct nlattr **tb,
1159 struct netlink_ext_ack *extack)
1160 {
1161 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1162 struct nf_conntrack_zone zone;
1163 struct nf_conn *tmpl;
1164 int err;
1165
1166 p->zone = NF_CT_DEFAULT_ZONE_ID;
1167
1168 tcf_ct_set_key_val(tb,
1169 &p->ct_action, TCA_CT_ACTION,
1170 NULL, TCA_CT_UNSPEC,
1171 sizeof(p->ct_action));
1172
1173 if (p->ct_action & TCA_CT_ACT_CLEAR)
1174 return 0;
1175
1176 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1177 if (err)
1178 return err;
1179
1180 if (tb[TCA_CT_MARK]) {
1181 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1182 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1183 return -EOPNOTSUPP;
1184 }
1185 tcf_ct_set_key_val(tb,
1186 &p->mark, TCA_CT_MARK,
1187 &p->mark_mask, TCA_CT_MARK_MASK,
1188 sizeof(p->mark));
1189 }
1190
1191 if (tb[TCA_CT_LABELS]) {
1192 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1193 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1194 return -EOPNOTSUPP;
1195 }
1196
1197 if (!tn->labels) {
1198 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1199 return -EOPNOTSUPP;
1200 }
1201 tcf_ct_set_key_val(tb,
1202 p->labels, TCA_CT_LABELS,
1203 p->labels_mask, TCA_CT_LABELS_MASK,
1204 sizeof(p->labels));
1205 }
1206
1207 if (tb[TCA_CT_ZONE]) {
1208 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1209 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1210 return -EOPNOTSUPP;
1211 }
1212
1213 tcf_ct_set_key_val(tb,
1214 &p->zone, TCA_CT_ZONE,
1215 NULL, TCA_CT_UNSPEC,
1216 sizeof(p->zone));
1217 }
1218
1219 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1220 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1221 if (!tmpl) {
1222 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1223 return -ENOMEM;
1224 }
1225 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1226 nf_conntrack_get(&tmpl->ct_general);
1227 p->tmpl = tmpl;
1228
1229 return 0;
1230 }
1231
tcf_ct_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int replace,int bind,bool rtnl_held,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)1232 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1233 struct nlattr *est, struct tc_action **a,
1234 int replace, int bind, bool rtnl_held,
1235 struct tcf_proto *tp, u32 flags,
1236 struct netlink_ext_ack *extack)
1237 {
1238 struct tc_action_net *tn = net_generic(net, ct_net_id);
1239 struct tcf_ct_params *params = NULL;
1240 struct nlattr *tb[TCA_CT_MAX + 1];
1241 struct tcf_chain *goto_ch = NULL;
1242 struct tc_ct *parm;
1243 struct tcf_ct *c;
1244 int err, res = 0;
1245 u32 index;
1246
1247 if (!nla) {
1248 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1249 return -EINVAL;
1250 }
1251
1252 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1253 if (err < 0)
1254 return err;
1255
1256 if (!tb[TCA_CT_PARMS]) {
1257 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1258 return -EINVAL;
1259 }
1260 parm = nla_data(tb[TCA_CT_PARMS]);
1261 index = parm->index;
1262 err = tcf_idr_check_alloc(tn, &index, a, bind);
1263 if (err < 0)
1264 return err;
1265
1266 if (!err) {
1267 err = tcf_idr_create_from_flags(tn, index, est, a,
1268 &act_ct_ops, bind, flags);
1269 if (err) {
1270 tcf_idr_cleanup(tn, index);
1271 return err;
1272 }
1273 res = ACT_P_CREATED;
1274 } else {
1275 if (bind)
1276 return 0;
1277
1278 if (!replace) {
1279 tcf_idr_release(*a, bind);
1280 return -EEXIST;
1281 }
1282 }
1283 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1284 if (err < 0)
1285 goto cleanup;
1286
1287 c = to_ct(*a);
1288
1289 params = kzalloc(sizeof(*params), GFP_KERNEL);
1290 if (unlikely(!params)) {
1291 err = -ENOMEM;
1292 goto cleanup;
1293 }
1294
1295 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1296 if (err)
1297 goto cleanup;
1298
1299 err = tcf_ct_flow_table_get(params);
1300 if (err)
1301 goto cleanup;
1302
1303 spin_lock_bh(&c->tcf_lock);
1304 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1305 params = rcu_replace_pointer(c->params, params,
1306 lockdep_is_held(&c->tcf_lock));
1307 spin_unlock_bh(&c->tcf_lock);
1308
1309 if (goto_ch)
1310 tcf_chain_put_by_act(goto_ch);
1311 if (params)
1312 call_rcu(¶ms->rcu, tcf_ct_params_free);
1313
1314 return res;
1315
1316 cleanup:
1317 if (goto_ch)
1318 tcf_chain_put_by_act(goto_ch);
1319 kfree(params);
1320 tcf_idr_release(*a, bind);
1321 return err;
1322 }
1323
tcf_ct_cleanup(struct tc_action * a)1324 static void tcf_ct_cleanup(struct tc_action *a)
1325 {
1326 struct tcf_ct_params *params;
1327 struct tcf_ct *c = to_ct(a);
1328
1329 params = rcu_dereference_protected(c->params, 1);
1330 if (params)
1331 call_rcu(¶ms->rcu, tcf_ct_params_free);
1332 }
1333
tcf_ct_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)1334 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1335 void *val, int val_type,
1336 void *mask, int mask_type,
1337 int len)
1338 {
1339 int err;
1340
1341 if (mask && !memchr_inv(mask, 0, len))
1342 return 0;
1343
1344 err = nla_put(skb, val_type, len, val);
1345 if (err)
1346 return err;
1347
1348 if (mask_type != TCA_CT_UNSPEC) {
1349 err = nla_put(skb, mask_type, len, mask);
1350 if (err)
1351 return err;
1352 }
1353
1354 return 0;
1355 }
1356
tcf_ct_dump_nat(struct sk_buff * skb,struct tcf_ct_params * p)1357 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1358 {
1359 struct nf_nat_range2 *range = &p->range;
1360
1361 if (!(p->ct_action & TCA_CT_ACT_NAT))
1362 return 0;
1363
1364 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1365 return 0;
1366
1367 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1368 if (p->ipv4_range) {
1369 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1370 range->min_addr.ip))
1371 return -1;
1372 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1373 range->max_addr.ip))
1374 return -1;
1375 } else {
1376 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1377 &range->min_addr.in6))
1378 return -1;
1379 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1380 &range->max_addr.in6))
1381 return -1;
1382 }
1383 }
1384
1385 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1386 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1387 range->min_proto.all))
1388 return -1;
1389 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1390 range->max_proto.all))
1391 return -1;
1392 }
1393
1394 return 0;
1395 }
1396
tcf_ct_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1397 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1398 int bind, int ref)
1399 {
1400 unsigned char *b = skb_tail_pointer(skb);
1401 struct tcf_ct *c = to_ct(a);
1402 struct tcf_ct_params *p;
1403
1404 struct tc_ct opt = {
1405 .index = c->tcf_index,
1406 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1407 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1408 };
1409 struct tcf_t t;
1410
1411 spin_lock_bh(&c->tcf_lock);
1412 p = rcu_dereference_protected(c->params,
1413 lockdep_is_held(&c->tcf_lock));
1414 opt.action = c->tcf_action;
1415
1416 if (tcf_ct_dump_key_val(skb,
1417 &p->ct_action, TCA_CT_ACTION,
1418 NULL, TCA_CT_UNSPEC,
1419 sizeof(p->ct_action)))
1420 goto nla_put_failure;
1421
1422 if (p->ct_action & TCA_CT_ACT_CLEAR)
1423 goto skip_dump;
1424
1425 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1426 tcf_ct_dump_key_val(skb,
1427 &p->mark, TCA_CT_MARK,
1428 &p->mark_mask, TCA_CT_MARK_MASK,
1429 sizeof(p->mark)))
1430 goto nla_put_failure;
1431
1432 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1433 tcf_ct_dump_key_val(skb,
1434 p->labels, TCA_CT_LABELS,
1435 p->labels_mask, TCA_CT_LABELS_MASK,
1436 sizeof(p->labels)))
1437 goto nla_put_failure;
1438
1439 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1440 tcf_ct_dump_key_val(skb,
1441 &p->zone, TCA_CT_ZONE,
1442 NULL, TCA_CT_UNSPEC,
1443 sizeof(p->zone)))
1444 goto nla_put_failure;
1445
1446 if (tcf_ct_dump_nat(skb, p))
1447 goto nla_put_failure;
1448
1449 skip_dump:
1450 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1451 goto nla_put_failure;
1452
1453 tcf_tm_dump(&t, &c->tcf_tm);
1454 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1455 goto nla_put_failure;
1456 spin_unlock_bh(&c->tcf_lock);
1457
1458 return skb->len;
1459 nla_put_failure:
1460 spin_unlock_bh(&c->tcf_lock);
1461 nlmsg_trim(skb, b);
1462 return -1;
1463 }
1464
tcf_ct_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)1465 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1466 struct netlink_callback *cb, int type,
1467 const struct tc_action_ops *ops,
1468 struct netlink_ext_ack *extack)
1469 {
1470 struct tc_action_net *tn = net_generic(net, ct_net_id);
1471
1472 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1473 }
1474
tcf_ct_search(struct net * net,struct tc_action ** a,u32 index)1475 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1476 {
1477 struct tc_action_net *tn = net_generic(net, ct_net_id);
1478
1479 return tcf_idr_search(tn, a, index);
1480 }
1481
tcf_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)1482 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1483 u64 drops, u64 lastuse, bool hw)
1484 {
1485 struct tcf_ct *c = to_ct(a);
1486
1487 tcf_action_update_stats(a, bytes, packets, drops, hw);
1488 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1489 }
1490
1491 static struct tc_action_ops act_ct_ops = {
1492 .kind = "ct",
1493 .id = TCA_ID_CT,
1494 .owner = THIS_MODULE,
1495 .act = tcf_ct_act,
1496 .dump = tcf_ct_dump,
1497 .init = tcf_ct_init,
1498 .cleanup = tcf_ct_cleanup,
1499 .walk = tcf_ct_walker,
1500 .lookup = tcf_ct_search,
1501 .stats_update = tcf_stats_update,
1502 .size = sizeof(struct tcf_ct),
1503 };
1504
ct_init_net(struct net * net)1505 static __net_init int ct_init_net(struct net *net)
1506 {
1507 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1508 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1509
1510 if (nf_connlabels_get(net, n_bits - 1)) {
1511 tn->labels = false;
1512 pr_err("act_ct: Failed to set connlabels length");
1513 } else {
1514 tn->labels = true;
1515 }
1516
1517 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1518 }
1519
ct_exit_net(struct list_head * net_list)1520 static void __net_exit ct_exit_net(struct list_head *net_list)
1521 {
1522 struct net *net;
1523
1524 rtnl_lock();
1525 list_for_each_entry(net, net_list, exit_list) {
1526 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1527
1528 if (tn->labels)
1529 nf_connlabels_put(net);
1530 }
1531 rtnl_unlock();
1532
1533 tc_action_net_exit(net_list, ct_net_id);
1534 }
1535
1536 static struct pernet_operations ct_net_ops = {
1537 .init = ct_init_net,
1538 .exit_batch = ct_exit_net,
1539 .id = &ct_net_id,
1540 .size = sizeof(struct tc_ct_action_net),
1541 };
1542
ct_init_module(void)1543 static int __init ct_init_module(void)
1544 {
1545 int err;
1546
1547 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1548 if (!act_ct_wq)
1549 return -ENOMEM;
1550
1551 err = tcf_ct_flow_tables_init();
1552 if (err)
1553 goto err_tbl_init;
1554
1555 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1556 if (err)
1557 goto err_register;
1558
1559 return 0;
1560
1561 err_register:
1562 tcf_ct_flow_tables_uninit();
1563 err_tbl_init:
1564 destroy_workqueue(act_ct_wq);
1565 return err;
1566 }
1567
ct_cleanup_module(void)1568 static void __exit ct_cleanup_module(void)
1569 {
1570 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1571 tcf_ct_flow_tables_uninit();
1572 destroy_workqueue(act_ct_wq);
1573 }
1574
1575 module_init(ct_init_module);
1576 module_exit(ct_cleanup_module);
1577 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1578 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1579 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1580 MODULE_DESCRIPTION("Connection tracking action");
1581 MODULE_LICENSE("GPL v2");
1582