1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Connection state tracking for netfilter. This is separated from,
3 but required by, the NAT layer; it can also be used by an iptables
4 extension. */
5
6 /* (C) 1999-2001 Paul `Rusty' Russell
7 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
8 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/siphash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
35 #include <trace/hooks/net.h>
36
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_core.h>
43 #include <net/netfilter/nf_conntrack_extend.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_ecache.h>
46 #include <net/netfilter/nf_conntrack_zones.h>
47 #include <net/netfilter/nf_conntrack_timestamp.h>
48 #include <net/netfilter/nf_conntrack_timeout.h>
49 #include <net/netfilter/nf_conntrack_labels.h>
50 #include <net/netfilter/nf_conntrack_synproxy.h>
51 #include <net/netfilter/nf_nat.h>
52 #include <net/netfilter/nf_nat_helper.h>
53 #include <net/netns/hash.h>
54 #include <net/ip.h>
55
56 #include "nf_internals.h"
57
58 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
59 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
60
61 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
62 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
63
64 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
65 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
66
67 struct conntrack_gc_work {
68 struct delayed_work dwork;
69 u32 next_bucket;
70 u32 avg_timeout;
71 u32 count;
72 u32 start_time;
73 bool exiting;
74 bool early_drop;
75 };
76
77 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
78 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
79 static __read_mostly bool nf_conntrack_locks_all;
80
81 /* serialize hash resizes and nf_ct_iterate_cleanup */
82 static DEFINE_MUTEX(nf_conntrack_mutex);
83
84 #define GC_SCAN_INTERVAL_MAX (60ul * HZ)
85 #define GC_SCAN_INTERVAL_MIN (1ul * HZ)
86
87 /* clamp timeouts to this value (TCP unacked) */
88 #define GC_SCAN_INTERVAL_CLAMP (300ul * HZ)
89
90 /* Initial bias pretending we have 100 entries at the upper bound so we don't
91 * wakeup often just because we have three entries with a 1s timeout while still
92 * allowing non-idle machines to wakeup more often when needed.
93 */
94 #define GC_SCAN_INITIAL_COUNT 100
95 #define GC_SCAN_INTERVAL_INIT GC_SCAN_INTERVAL_MAX
96
97 #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
98 #define GC_SCAN_EXPIRED_MAX (64000u / HZ)
99
100 #define MIN_CHAINLEN 50u
101 #define MAX_CHAINLEN (80u - MIN_CHAINLEN)
102
103 static struct conntrack_gc_work conntrack_gc_work;
104
nf_conntrack_lock(spinlock_t * lock)105 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
106 {
107 /* 1) Acquire the lock */
108 spin_lock(lock);
109
110 /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
111 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
112 */
113 if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
114 return;
115
116 /* fast path failed, unlock */
117 spin_unlock(lock);
118
119 /* Slow path 1) get global lock */
120 spin_lock(&nf_conntrack_locks_all_lock);
121
122 /* Slow path 2) get the lock we want */
123 spin_lock(lock);
124
125 /* Slow path 3) release the global lock */
126 spin_unlock(&nf_conntrack_locks_all_lock);
127 }
128 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
129
nf_conntrack_double_unlock(unsigned int h1,unsigned int h2)130 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
131 {
132 h1 %= CONNTRACK_LOCKS;
133 h2 %= CONNTRACK_LOCKS;
134 spin_unlock(&nf_conntrack_locks[h1]);
135 if (h1 != h2)
136 spin_unlock(&nf_conntrack_locks[h2]);
137 }
138
139 /* return true if we need to recompute hashes (in case hash table was resized) */
nf_conntrack_double_lock(struct net * net,unsigned int h1,unsigned int h2,unsigned int sequence)140 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
141 unsigned int h2, unsigned int sequence)
142 {
143 h1 %= CONNTRACK_LOCKS;
144 h2 %= CONNTRACK_LOCKS;
145 if (h1 <= h2) {
146 nf_conntrack_lock(&nf_conntrack_locks[h1]);
147 if (h1 != h2)
148 spin_lock_nested(&nf_conntrack_locks[h2],
149 SINGLE_DEPTH_NESTING);
150 } else {
151 nf_conntrack_lock(&nf_conntrack_locks[h2]);
152 spin_lock_nested(&nf_conntrack_locks[h1],
153 SINGLE_DEPTH_NESTING);
154 }
155 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
156 nf_conntrack_double_unlock(h1, h2);
157 return true;
158 }
159 return false;
160 }
161
nf_conntrack_all_lock(void)162 static void nf_conntrack_all_lock(void)
163 __acquires(&nf_conntrack_locks_all_lock)
164 {
165 int i;
166
167 spin_lock(&nf_conntrack_locks_all_lock);
168
169 /* For nf_contrack_locks_all, only the latest time when another
170 * CPU will see an update is controlled, by the "release" of the
171 * spin_lock below.
172 * The earliest time is not controlled, an thus KCSAN could detect
173 * a race when nf_conntract_lock() reads the variable.
174 * WRITE_ONCE() is used to ensure the compiler will not
175 * optimize the write.
176 */
177 WRITE_ONCE(nf_conntrack_locks_all, true);
178
179 for (i = 0; i < CONNTRACK_LOCKS; i++) {
180 spin_lock(&nf_conntrack_locks[i]);
181
182 /* This spin_unlock provides the "release" to ensure that
183 * nf_conntrack_locks_all==true is visible to everyone that
184 * acquired spin_lock(&nf_conntrack_locks[]).
185 */
186 spin_unlock(&nf_conntrack_locks[i]);
187 }
188 }
189
nf_conntrack_all_unlock(void)190 static void nf_conntrack_all_unlock(void)
191 __releases(&nf_conntrack_locks_all_lock)
192 {
193 /* All prior stores must be complete before we clear
194 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
195 * might observe the false value but not the entire
196 * critical section.
197 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
198 */
199 smp_store_release(&nf_conntrack_locks_all, false);
200 spin_unlock(&nf_conntrack_locks_all_lock);
201 }
202
203 unsigned int nf_conntrack_htable_size __read_mostly;
204 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
205
206 unsigned int nf_conntrack_max __read_mostly;
207 EXPORT_SYMBOL_GPL(nf_conntrack_max);
208 seqcount_spinlock_t nf_conntrack_generation __read_mostly;
209 static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
210
hash_conntrack_raw(const struct nf_conntrack_tuple * tuple,unsigned int zoneid,const struct net * net)211 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
212 unsigned int zoneid,
213 const struct net *net)
214 {
215 struct {
216 struct nf_conntrack_man src;
217 union nf_inet_addr dst_addr;
218 unsigned int zone;
219 u32 net_mix;
220 u16 dport;
221 u16 proto;
222 } __aligned(SIPHASH_ALIGNMENT) combined;
223
224 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
225
226 memset(&combined, 0, sizeof(combined));
227
228 /* The direction must be ignored, so handle usable members manually. */
229 combined.src = tuple->src;
230 combined.dst_addr = tuple->dst.u3;
231 combined.zone = zoneid;
232 combined.net_mix = net_hash_mix(net);
233 combined.dport = (__force __u16)tuple->dst.u.all;
234 combined.proto = tuple->dst.protonum;
235
236 return (u32)siphash(&combined, sizeof(combined), &nf_conntrack_hash_rnd);
237 }
238
scale_hash(u32 hash)239 static u32 scale_hash(u32 hash)
240 {
241 return reciprocal_scale(hash, nf_conntrack_htable_size);
242 }
243
__hash_conntrack(const struct net * net,const struct nf_conntrack_tuple * tuple,unsigned int zoneid,unsigned int size)244 static u32 __hash_conntrack(const struct net *net,
245 const struct nf_conntrack_tuple *tuple,
246 unsigned int zoneid,
247 unsigned int size)
248 {
249 return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
250 }
251
hash_conntrack(const struct net * net,const struct nf_conntrack_tuple * tuple,unsigned int zoneid)252 static u32 hash_conntrack(const struct net *net,
253 const struct nf_conntrack_tuple *tuple,
254 unsigned int zoneid)
255 {
256 return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
257 }
258
nf_ct_get_tuple_ports(const struct sk_buff * skb,unsigned int dataoff,struct nf_conntrack_tuple * tuple)259 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
260 unsigned int dataoff,
261 struct nf_conntrack_tuple *tuple)
262 { struct {
263 __be16 sport;
264 __be16 dport;
265 } _inet_hdr, *inet_hdr;
266
267 /* Actually only need first 4 bytes to get ports. */
268 inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
269 if (!inet_hdr)
270 return false;
271
272 tuple->src.u.udp.port = inet_hdr->sport;
273 tuple->dst.u.udp.port = inet_hdr->dport;
274 return true;
275 }
276
277 static bool
nf_ct_get_tuple(const struct sk_buff * skb,unsigned int nhoff,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct net * net,struct nf_conntrack_tuple * tuple)278 nf_ct_get_tuple(const struct sk_buff *skb,
279 unsigned int nhoff,
280 unsigned int dataoff,
281 u_int16_t l3num,
282 u_int8_t protonum,
283 struct net *net,
284 struct nf_conntrack_tuple *tuple)
285 {
286 unsigned int size;
287 const __be32 *ap;
288 __be32 _addrs[8];
289
290 memset(tuple, 0, sizeof(*tuple));
291
292 tuple->src.l3num = l3num;
293 switch (l3num) {
294 case NFPROTO_IPV4:
295 nhoff += offsetof(struct iphdr, saddr);
296 size = 2 * sizeof(__be32);
297 break;
298 case NFPROTO_IPV6:
299 nhoff += offsetof(struct ipv6hdr, saddr);
300 size = sizeof(_addrs);
301 break;
302 default:
303 return true;
304 }
305
306 ap = skb_header_pointer(skb, nhoff, size, _addrs);
307 if (!ap)
308 return false;
309
310 switch (l3num) {
311 case NFPROTO_IPV4:
312 tuple->src.u3.ip = ap[0];
313 tuple->dst.u3.ip = ap[1];
314 break;
315 case NFPROTO_IPV6:
316 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
317 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
318 break;
319 }
320
321 tuple->dst.protonum = protonum;
322 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
323
324 switch (protonum) {
325 #if IS_ENABLED(CONFIG_IPV6)
326 case IPPROTO_ICMPV6:
327 return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
328 #endif
329 case IPPROTO_ICMP:
330 return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
331 #ifdef CONFIG_NF_CT_PROTO_GRE
332 case IPPROTO_GRE:
333 return gre_pkt_to_tuple(skb, dataoff, net, tuple);
334 #endif
335 case IPPROTO_TCP:
336 case IPPROTO_UDP: /* fallthrough */
337 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
338 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
339 case IPPROTO_UDPLITE:
340 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
341 #endif
342 #ifdef CONFIG_NF_CT_PROTO_SCTP
343 case IPPROTO_SCTP:
344 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
345 #endif
346 #ifdef CONFIG_NF_CT_PROTO_DCCP
347 case IPPROTO_DCCP:
348 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
349 #endif
350 default:
351 break;
352 }
353
354 return true;
355 }
356
ipv4_get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u_int8_t * protonum)357 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
358 u_int8_t *protonum)
359 {
360 int dataoff = -1;
361 const struct iphdr *iph;
362 struct iphdr _iph;
363
364 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
365 if (!iph)
366 return -1;
367
368 /* Conntrack defragments packets, we might still see fragments
369 * inside ICMP packets though.
370 */
371 if (iph->frag_off & htons(IP_OFFSET))
372 return -1;
373
374 dataoff = nhoff + (iph->ihl << 2);
375 *protonum = iph->protocol;
376
377 /* Check bogus IP headers */
378 if (dataoff > skb->len) {
379 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
380 nhoff, iph->ihl << 2, skb->len);
381 return -1;
382 }
383 return dataoff;
384 }
385
386 #if IS_ENABLED(CONFIG_IPV6)
ipv6_get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u8 * protonum)387 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
388 u8 *protonum)
389 {
390 int protoff = -1;
391 unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
392 __be16 frag_off;
393 u8 nexthdr;
394
395 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
396 &nexthdr, sizeof(nexthdr)) != 0) {
397 pr_debug("can't get nexthdr\n");
398 return -1;
399 }
400 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
401 /*
402 * (protoff == skb->len) means the packet has not data, just
403 * IPv6 and possibly extensions headers, but it is tracked anyway
404 */
405 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
406 pr_debug("can't find proto in pkt\n");
407 return -1;
408 }
409
410 *protonum = nexthdr;
411 return protoff;
412 }
413 #endif
414
get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u8 pf,u8 * l4num)415 static int get_l4proto(const struct sk_buff *skb,
416 unsigned int nhoff, u8 pf, u8 *l4num)
417 {
418 switch (pf) {
419 case NFPROTO_IPV4:
420 return ipv4_get_l4proto(skb, nhoff, l4num);
421 #if IS_ENABLED(CONFIG_IPV6)
422 case NFPROTO_IPV6:
423 return ipv6_get_l4proto(skb, nhoff, l4num);
424 #endif
425 default:
426 *l4num = 0;
427 break;
428 }
429 return -1;
430 }
431
nf_ct_get_tuplepr(const struct sk_buff * skb,unsigned int nhoff,u_int16_t l3num,struct net * net,struct nf_conntrack_tuple * tuple)432 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
433 u_int16_t l3num,
434 struct net *net, struct nf_conntrack_tuple *tuple)
435 {
436 u8 protonum;
437 int protoff;
438
439 protoff = get_l4proto(skb, nhoff, l3num, &protonum);
440 if (protoff <= 0)
441 return false;
442
443 return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
444 }
445 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
446
447 bool
nf_ct_invert_tuple(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig)448 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
449 const struct nf_conntrack_tuple *orig)
450 {
451 memset(inverse, 0, sizeof(*inverse));
452
453 inverse->src.l3num = orig->src.l3num;
454
455 switch (orig->src.l3num) {
456 case NFPROTO_IPV4:
457 inverse->src.u3.ip = orig->dst.u3.ip;
458 inverse->dst.u3.ip = orig->src.u3.ip;
459 break;
460 case NFPROTO_IPV6:
461 inverse->src.u3.in6 = orig->dst.u3.in6;
462 inverse->dst.u3.in6 = orig->src.u3.in6;
463 break;
464 default:
465 break;
466 }
467
468 inverse->dst.dir = !orig->dst.dir;
469
470 inverse->dst.protonum = orig->dst.protonum;
471
472 switch (orig->dst.protonum) {
473 case IPPROTO_ICMP:
474 return nf_conntrack_invert_icmp_tuple(inverse, orig);
475 #if IS_ENABLED(CONFIG_IPV6)
476 case IPPROTO_ICMPV6:
477 return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
478 #endif
479 }
480
481 inverse->src.u.all = orig->dst.u.all;
482 inverse->dst.u.all = orig->src.u.all;
483 return true;
484 }
485 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
486
487 /* Generate a almost-unique pseudo-id for a given conntrack.
488 *
489 * intentionally doesn't re-use any of the seeds used for hash
490 * table location, we assume id gets exposed to userspace.
491 *
492 * Following nf_conn items do not change throughout lifetime
493 * of the nf_conn:
494 *
495 * 1. nf_conn address
496 * 2. nf_conn->master address (normally NULL)
497 * 3. the associated net namespace
498 * 4. the original direction tuple
499 */
nf_ct_get_id(const struct nf_conn * ct)500 u32 nf_ct_get_id(const struct nf_conn *ct)
501 {
502 static __read_mostly siphash_key_t ct_id_seed;
503 unsigned long a, b, c, d;
504
505 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
506
507 a = (unsigned long)ct;
508 b = (unsigned long)ct->master;
509 c = (unsigned long)nf_ct_net(ct);
510 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
511 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
512 &ct_id_seed);
513 #ifdef CONFIG_64BIT
514 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
515 #else
516 return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
517 #endif
518 }
519 EXPORT_SYMBOL_GPL(nf_ct_get_id);
520
521 static void
clean_from_lists(struct nf_conn * ct)522 clean_from_lists(struct nf_conn *ct)
523 {
524 pr_debug("clean_from_lists(%p)\n", ct);
525 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
526 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
527
528 /* Destroy all pending expectations */
529 nf_ct_remove_expectations(ct);
530 }
531
532 /* must be called with local_bh_disable */
nf_ct_add_to_dying_list(struct nf_conn * ct)533 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
534 {
535 struct ct_pcpu *pcpu;
536
537 /* add this conntrack to the (per cpu) dying list */
538 ct->cpu = smp_processor_id();
539 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
540
541 spin_lock(&pcpu->lock);
542 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
543 &pcpu->dying);
544 spin_unlock(&pcpu->lock);
545 }
546
547 /* must be called with local_bh_disable */
nf_ct_add_to_unconfirmed_list(struct nf_conn * ct)548 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
549 {
550 struct ct_pcpu *pcpu;
551
552 /* add this conntrack to the (per cpu) unconfirmed list */
553 ct->cpu = smp_processor_id();
554 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
555
556 spin_lock(&pcpu->lock);
557 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
558 &pcpu->unconfirmed);
559 spin_unlock(&pcpu->lock);
560 }
561
562 /* must be called with local_bh_disable */
nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn * ct)563 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
564 {
565 struct ct_pcpu *pcpu;
566
567 /* We overload first tuple to link into unconfirmed or dying list.*/
568 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
569
570 spin_lock(&pcpu->lock);
571 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
572 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
573 spin_unlock(&pcpu->lock);
574 }
575
576 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
577
578 /* Released via nf_ct_destroy() */
nf_ct_tmpl_alloc(struct net * net,const struct nf_conntrack_zone * zone,gfp_t flags)579 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
580 const struct nf_conntrack_zone *zone,
581 gfp_t flags)
582 {
583 struct nf_conn *tmpl, *p;
584
585 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
586 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
587 if (!tmpl)
588 return NULL;
589
590 p = tmpl;
591 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
592 if (tmpl != p) {
593 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
594 tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
595 }
596 } else {
597 tmpl = kzalloc(sizeof(*tmpl), flags);
598 if (!tmpl)
599 return NULL;
600 }
601
602 tmpl->status = IPS_TEMPLATE;
603 write_pnet(&tmpl->ct_net, net);
604 nf_ct_zone_add(tmpl, zone);
605 refcount_set(&tmpl->ct_general.use, 1);
606
607 return tmpl;
608 }
609 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
610
nf_ct_tmpl_free(struct nf_conn * tmpl)611 void nf_ct_tmpl_free(struct nf_conn *tmpl)
612 {
613 nf_ct_ext_destroy(tmpl);
614
615 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
616 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
617 else
618 kfree(tmpl);
619 }
620 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
621
destroy_gre_conntrack(struct nf_conn * ct)622 static void destroy_gre_conntrack(struct nf_conn *ct)
623 {
624 #ifdef CONFIG_NF_CT_PROTO_GRE
625 struct nf_conn *master = ct->master;
626
627 if (master)
628 nf_ct_gre_keymap_destroy(master);
629 #endif
630 }
631
nf_ct_destroy(struct nf_conntrack * nfct)632 void nf_ct_destroy(struct nf_conntrack *nfct)
633 {
634 struct nf_conn *ct = (struct nf_conn *)nfct;
635
636 pr_debug("%s(%p)\n", __func__, ct);
637 WARN_ON(refcount_read(&nfct->use) != 0);
638
639 if (unlikely(nf_ct_is_template(ct))) {
640 nf_ct_tmpl_free(ct);
641 return;
642 }
643
644 if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
645 destroy_gre_conntrack(ct);
646
647 local_bh_disable();
648 /* Expectations will have been removed in clean_from_lists,
649 * except TFTP can create an expectation on the first packet,
650 * before connection is in the list, so we need to clean here,
651 * too.
652 */
653 nf_ct_remove_expectations(ct);
654
655 nf_ct_del_from_dying_or_unconfirmed_list(ct);
656
657 local_bh_enable();
658
659 if (ct->master)
660 nf_ct_put(ct->master);
661
662 pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
663 nf_conntrack_free(ct);
664 }
665 EXPORT_SYMBOL(nf_ct_destroy);
666
nf_ct_delete_from_lists(struct nf_conn * ct)667 static void nf_ct_delete_from_lists(struct nf_conn *ct)
668 {
669 struct net *net = nf_ct_net(ct);
670 unsigned int hash, reply_hash;
671 unsigned int sequence;
672
673 nf_ct_helper_destroy(ct);
674
675 local_bh_disable();
676 do {
677 sequence = read_seqcount_begin(&nf_conntrack_generation);
678 hash = hash_conntrack(net,
679 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
680 nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
681 reply_hash = hash_conntrack(net,
682 &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
683 nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
684 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
685
686 clean_from_lists(ct);
687 nf_conntrack_double_unlock(hash, reply_hash);
688
689 nf_ct_add_to_dying_list(ct);
690
691 local_bh_enable();
692 }
693
nf_ct_delete(struct nf_conn * ct,u32 portid,int report)694 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
695 {
696 struct nf_conn_tstamp *tstamp;
697 struct net *net;
698
699 if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
700 return false;
701
702 tstamp = nf_conn_tstamp_find(ct);
703 if (tstamp) {
704 s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
705
706 tstamp->stop = ktime_get_real_ns();
707 if (timeout < 0)
708 tstamp->stop -= jiffies_to_nsecs(-timeout);
709 }
710
711 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
712 portid, report) < 0) {
713 /* destroy event was not delivered. nf_ct_put will
714 * be done by event cache worker on redelivery.
715 */
716 nf_ct_delete_from_lists(ct);
717 nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL);
718 return false;
719 }
720
721 net = nf_ct_net(ct);
722 if (nf_conntrack_ecache_dwork_pending(net))
723 nf_conntrack_ecache_work(net, NFCT_ECACHE_DESTROY_SENT);
724 nf_ct_delete_from_lists(ct);
725 nf_ct_put(ct);
726 return true;
727 }
728 EXPORT_SYMBOL_GPL(nf_ct_delete);
729
730 static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash * h,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone,const struct net * net)731 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
732 const struct nf_conntrack_tuple *tuple,
733 const struct nf_conntrack_zone *zone,
734 const struct net *net)
735 {
736 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
737
738 /* A conntrack can be recreated with the equal tuple,
739 * so we need to check that the conntrack is confirmed
740 */
741 return nf_ct_tuple_equal(tuple, &h->tuple) &&
742 nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
743 nf_ct_is_confirmed(ct) &&
744 net_eq(net, nf_ct_net(ct));
745 }
746
747 static inline bool
nf_ct_match(const struct nf_conn * ct1,const struct nf_conn * ct2)748 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
749 {
750 return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
751 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
752 nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
753 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
754 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
755 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
756 net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
757 }
758
759 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
nf_ct_gc_expired(struct nf_conn * ct)760 static void nf_ct_gc_expired(struct nf_conn *ct)
761 {
762 if (!refcount_inc_not_zero(&ct->ct_general.use))
763 return;
764
765 if (nf_ct_should_gc(ct))
766 nf_ct_kill(ct);
767
768 nf_ct_put(ct);
769 }
770
771 /*
772 * Warning :
773 * - Caller must take a reference on returned object
774 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
775 */
776 static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)777 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
778 const struct nf_conntrack_tuple *tuple, u32 hash)
779 {
780 struct nf_conntrack_tuple_hash *h;
781 struct hlist_nulls_head *ct_hash;
782 struct hlist_nulls_node *n;
783 unsigned int bucket, hsize;
784
785 begin:
786 nf_conntrack_get_ht(&ct_hash, &hsize);
787 bucket = reciprocal_scale(hash, hsize);
788
789 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
790 struct nf_conn *ct;
791
792 ct = nf_ct_tuplehash_to_ctrack(h);
793 if (nf_ct_is_expired(ct)) {
794 nf_ct_gc_expired(ct);
795 continue;
796 }
797
798 if (nf_ct_key_equal(h, tuple, zone, net))
799 return h;
800 }
801 /*
802 * if the nulls value we got at the end of this lookup is
803 * not the expected one, we must restart lookup.
804 * We probably met an item that was moved to another chain.
805 */
806 if (get_nulls_value(n) != bucket) {
807 NF_CT_STAT_INC_ATOMIC(net, search_restart);
808 goto begin;
809 }
810
811 return NULL;
812 }
813
814 /* Find a connection corresponding to a tuple. */
815 static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)816 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
817 const struct nf_conntrack_tuple *tuple, u32 hash)
818 {
819 struct nf_conntrack_tuple_hash *h;
820 struct nf_conn *ct;
821
822 rcu_read_lock();
823
824 h = ____nf_conntrack_find(net, zone, tuple, hash);
825 if (h) {
826 /* We have a candidate that matches the tuple we're interested
827 * in, try to obtain a reference and re-check tuple
828 */
829 ct = nf_ct_tuplehash_to_ctrack(h);
830 if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
831 if (likely(nf_ct_key_equal(h, tuple, zone, net)))
832 goto found;
833
834 /* TYPESAFE_BY_RCU recycled the candidate */
835 nf_ct_put(ct);
836 }
837
838 h = NULL;
839 }
840 found:
841 rcu_read_unlock();
842
843 return h;
844 }
845
846 struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple)847 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
848 const struct nf_conntrack_tuple *tuple)
849 {
850 unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
851 struct nf_conntrack_tuple_hash *thash;
852
853 thash = __nf_conntrack_find_get(net, zone, tuple,
854 hash_conntrack_raw(tuple, zone_id, net));
855
856 if (thash)
857 return thash;
858
859 rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
860 if (rid != zone_id)
861 return __nf_conntrack_find_get(net, zone, tuple,
862 hash_conntrack_raw(tuple, rid, net));
863 return thash;
864 }
865 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
866
__nf_conntrack_hash_insert(struct nf_conn * ct,unsigned int hash,unsigned int reply_hash)867 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
868 unsigned int hash,
869 unsigned int reply_hash)
870 {
871 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
872 &nf_conntrack_hash[hash]);
873 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
874 &nf_conntrack_hash[reply_hash]);
875 }
876
877 int
nf_conntrack_hash_check_insert(struct nf_conn * ct)878 nf_conntrack_hash_check_insert(struct nf_conn *ct)
879 {
880 const struct nf_conntrack_zone *zone;
881 struct net *net = nf_ct_net(ct);
882 unsigned int hash, reply_hash;
883 struct nf_conntrack_tuple_hash *h;
884 struct hlist_nulls_node *n;
885 unsigned int max_chainlen;
886 unsigned int chainlen = 0;
887 unsigned int sequence;
888 int err = -EEXIST;
889
890 zone = nf_ct_zone(ct);
891
892 local_bh_disable();
893 do {
894 sequence = read_seqcount_begin(&nf_conntrack_generation);
895 hash = hash_conntrack(net,
896 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
897 nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
898 reply_hash = hash_conntrack(net,
899 &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
900 nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
901 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
902
903 max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
904
905 /* See if there's one in the list already, including reverse */
906 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
907 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
908 zone, net))
909 goto out;
910
911 if (chainlen++ > max_chainlen)
912 goto chaintoolong;
913 }
914
915 chainlen = 0;
916
917 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
918 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
919 zone, net))
920 goto out;
921 if (chainlen++ > max_chainlen)
922 goto chaintoolong;
923 }
924
925 smp_wmb();
926 /* The caller holds a reference to this object */
927 refcount_set(&ct->ct_general.use, 2);
928 __nf_conntrack_hash_insert(ct, hash, reply_hash);
929 nf_conntrack_double_unlock(hash, reply_hash);
930 NF_CT_STAT_INC(net, insert);
931 local_bh_enable();
932 return 0;
933 chaintoolong:
934 NF_CT_STAT_INC(net, chaintoolong);
935 err = -ENOSPC;
936 out:
937 nf_conntrack_double_unlock(hash, reply_hash);
938 local_bh_enable();
939 return err;
940 }
941 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
942
nf_ct_acct_add(struct nf_conn * ct,u32 dir,unsigned int packets,unsigned int bytes)943 void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
944 unsigned int bytes)
945 {
946 struct nf_conn_acct *acct;
947
948 acct = nf_conn_acct_find(ct);
949 if (acct) {
950 struct nf_conn_counter *counter = acct->counter;
951
952 atomic64_add(packets, &counter[dir].packets);
953 atomic64_add(bytes, &counter[dir].bytes);
954 }
955 }
956 EXPORT_SYMBOL_GPL(nf_ct_acct_add);
957
nf_ct_acct_merge(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct nf_conn * loser_ct)958 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
959 const struct nf_conn *loser_ct)
960 {
961 struct nf_conn_acct *acct;
962
963 acct = nf_conn_acct_find(loser_ct);
964 if (acct) {
965 struct nf_conn_counter *counter = acct->counter;
966 unsigned int bytes;
967
968 /* u32 should be fine since we must have seen one packet. */
969 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
970 nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
971 }
972 }
973
__nf_conntrack_insert_prepare(struct nf_conn * ct)974 static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
975 {
976 struct nf_conn_tstamp *tstamp;
977
978 refcount_inc(&ct->ct_general.use);
979 ct->status |= IPS_CONFIRMED;
980
981 /* set conntrack timestamp, if enabled. */
982 tstamp = nf_conn_tstamp_find(ct);
983 if (tstamp)
984 tstamp->start = ktime_get_real_ns();
985 }
986
987 /* caller must hold locks to prevent concurrent changes */
__nf_ct_resolve_clash(struct sk_buff * skb,struct nf_conntrack_tuple_hash * h)988 static int __nf_ct_resolve_clash(struct sk_buff *skb,
989 struct nf_conntrack_tuple_hash *h)
990 {
991 /* This is the conntrack entry already in hashes that won race. */
992 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
993 enum ip_conntrack_info ctinfo;
994 struct nf_conn *loser_ct;
995
996 loser_ct = nf_ct_get(skb, &ctinfo);
997
998 if (nf_ct_is_dying(ct))
999 return NF_DROP;
1000
1001 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
1002 nf_ct_match(ct, loser_ct)) {
1003 struct net *net = nf_ct_net(ct);
1004
1005 nf_conntrack_get(&ct->ct_general);
1006
1007 nf_ct_acct_merge(ct, ctinfo, loser_ct);
1008 nf_ct_add_to_dying_list(loser_ct);
1009 nf_ct_put(loser_ct);
1010 nf_ct_set(skb, ct, ctinfo);
1011
1012 NF_CT_STAT_INC(net, clash_resolve);
1013 return NF_ACCEPT;
1014 }
1015
1016 return NF_DROP;
1017 }
1018
1019 /**
1020 * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
1021 *
1022 * @skb: skb that causes the collision
1023 * @repl_idx: hash slot for reply direction
1024 *
1025 * Called when origin or reply direction had a clash.
1026 * The skb can be handled without packet drop provided the reply direction
1027 * is unique or there the existing entry has the identical tuple in both
1028 * directions.
1029 *
1030 * Caller must hold conntrack table locks to prevent concurrent updates.
1031 *
1032 * Returns NF_DROP if the clash could not be handled.
1033 */
nf_ct_resolve_clash_harder(struct sk_buff * skb,u32 repl_idx)1034 static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
1035 {
1036 struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
1037 const struct nf_conntrack_zone *zone;
1038 struct nf_conntrack_tuple_hash *h;
1039 struct hlist_nulls_node *n;
1040 struct net *net;
1041
1042 zone = nf_ct_zone(loser_ct);
1043 net = nf_ct_net(loser_ct);
1044
1045 /* Reply direction must never result in a clash, unless both origin
1046 * and reply tuples are identical.
1047 */
1048 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
1049 if (nf_ct_key_equal(h,
1050 &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1051 zone, net))
1052 return __nf_ct_resolve_clash(skb, h);
1053 }
1054
1055 /* We want the clashing entry to go away real soon: 1 second timeout. */
1056 WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
1057
1058 /* IPS_NAT_CLASH removes the entry automatically on the first
1059 * reply. Also prevents UDP tracker from moving the entry to
1060 * ASSURED state, i.e. the entry can always be evicted under
1061 * pressure.
1062 */
1063 loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
1064
1065 __nf_conntrack_insert_prepare(loser_ct);
1066
1067 /* fake add for ORIGINAL dir: we want lookups to only find the entry
1068 * already in the table. This also hides the clashing entry from
1069 * ctnetlink iteration, i.e. conntrack -L won't show them.
1070 */
1071 hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
1072
1073 hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
1074 &nf_conntrack_hash[repl_idx]);
1075
1076 NF_CT_STAT_INC(net, clash_resolve);
1077 return NF_ACCEPT;
1078 }
1079
1080 /**
1081 * nf_ct_resolve_clash - attempt to handle clash without packet drop
1082 *
1083 * @skb: skb that causes the clash
1084 * @h: tuplehash of the clashing entry already in table
1085 * @reply_hash: hash slot for reply direction
1086 *
1087 * A conntrack entry can be inserted to the connection tracking table
1088 * if there is no existing entry with an identical tuple.
1089 *
1090 * If there is one, @skb (and the assocated, unconfirmed conntrack) has
1091 * to be dropped. In case @skb is retransmitted, next conntrack lookup
1092 * will find the already-existing entry.
1093 *
1094 * The major problem with such packet drop is the extra delay added by
1095 * the packet loss -- it will take some time for a retransmit to occur
1096 * (or the sender to time out when waiting for a reply).
1097 *
1098 * This function attempts to handle the situation without packet drop.
1099 *
1100 * If @skb has no NAT transformation or if the colliding entries are
1101 * exactly the same, only the to-be-confirmed conntrack entry is discarded
1102 * and @skb is associated with the conntrack entry already in the table.
1103 *
1104 * Failing that, the new, unconfirmed conntrack is still added to the table
1105 * provided that the collision only occurs in the ORIGINAL direction.
1106 * The new entry will be added only in the non-clashing REPLY direction,
1107 * so packets in the ORIGINAL direction will continue to match the existing
1108 * entry. The new entry will also have a fixed timeout so it expires --
1109 * due to the collision, it will only see reply traffic.
1110 *
1111 * Returns NF_DROP if the clash could not be resolved.
1112 */
1113 static __cold noinline int
nf_ct_resolve_clash(struct sk_buff * skb,struct nf_conntrack_tuple_hash * h,u32 reply_hash)1114 nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
1115 u32 reply_hash)
1116 {
1117 /* This is the conntrack entry already in hashes that won race. */
1118 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
1119 const struct nf_conntrack_l4proto *l4proto;
1120 enum ip_conntrack_info ctinfo;
1121 struct nf_conn *loser_ct;
1122 struct net *net;
1123 int ret;
1124
1125 loser_ct = nf_ct_get(skb, &ctinfo);
1126 net = nf_ct_net(loser_ct);
1127
1128 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1129 if (!l4proto->allow_clash)
1130 goto drop;
1131
1132 ret = __nf_ct_resolve_clash(skb, h);
1133 if (ret == NF_ACCEPT)
1134 return ret;
1135
1136 ret = nf_ct_resolve_clash_harder(skb, reply_hash);
1137 if (ret == NF_ACCEPT)
1138 return ret;
1139
1140 drop:
1141 nf_ct_add_to_dying_list(loser_ct);
1142 NF_CT_STAT_INC(net, drop);
1143 NF_CT_STAT_INC(net, insert_failed);
1144 return NF_DROP;
1145 }
1146
1147 /* Confirm a connection given skb; places it in hash table */
1148 int
__nf_conntrack_confirm(struct sk_buff * skb)1149 __nf_conntrack_confirm(struct sk_buff *skb)
1150 {
1151 unsigned int chainlen = 0, sequence, max_chainlen;
1152 const struct nf_conntrack_zone *zone;
1153 unsigned int hash, reply_hash;
1154 struct nf_conntrack_tuple_hash *h;
1155 struct nf_conn *ct;
1156 struct nf_conn_help *help;
1157 struct hlist_nulls_node *n;
1158 enum ip_conntrack_info ctinfo;
1159 struct net *net;
1160 int ret = NF_DROP;
1161
1162 ct = nf_ct_get(skb, &ctinfo);
1163 net = nf_ct_net(ct);
1164
1165 /* ipt_REJECT uses nf_conntrack_attach to attach related
1166 ICMP/TCP RST packets in other direction. Actual packet
1167 which created connection will be IP_CT_NEW or for an
1168 expected connection, IP_CT_RELATED. */
1169 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
1170 return NF_ACCEPT;
1171
1172 zone = nf_ct_zone(ct);
1173 local_bh_disable();
1174
1175 do {
1176 sequence = read_seqcount_begin(&nf_conntrack_generation);
1177 /* reuse the hash saved before */
1178 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
1179 hash = scale_hash(hash);
1180 reply_hash = hash_conntrack(net,
1181 &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1182 nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
1183 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
1184
1185 /* We're not in hash table, and we refuse to set up related
1186 * connections for unconfirmed conns. But packet copies and
1187 * REJECT will give spurious warnings here.
1188 */
1189
1190 /* Another skb with the same unconfirmed conntrack may
1191 * win the race. This may happen for bridge(br_flood)
1192 * or broadcast/multicast packets do skb_clone with
1193 * unconfirmed conntrack.
1194 */
1195 if (unlikely(nf_ct_is_confirmed(ct))) {
1196 WARN_ON_ONCE(1);
1197 nf_conntrack_double_unlock(hash, reply_hash);
1198 local_bh_enable();
1199 return NF_DROP;
1200 }
1201
1202 pr_debug("Confirming conntrack %p\n", ct);
1203 /* We have to check the DYING flag after unlink to prevent
1204 * a race against nf_ct_get_next_corpse() possibly called from
1205 * user context, else we insert an already 'dead' hash, blocking
1206 * further use of that particular connection -JM.
1207 */
1208 nf_ct_del_from_dying_or_unconfirmed_list(ct);
1209
1210 if (unlikely(nf_ct_is_dying(ct))) {
1211 nf_ct_add_to_dying_list(ct);
1212 NF_CT_STAT_INC(net, insert_failed);
1213 goto dying;
1214 }
1215
1216 max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
1217 /* See if there's one in the list already, including reverse:
1218 NAT could have grabbed it without realizing, since we're
1219 not in the hash. If there is, we lost race. */
1220 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
1221 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1222 zone, net))
1223 goto out;
1224 if (chainlen++ > max_chainlen)
1225 goto chaintoolong;
1226 }
1227
1228 chainlen = 0;
1229 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
1230 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1231 zone, net))
1232 goto out;
1233 if (chainlen++ > max_chainlen) {
1234 chaintoolong:
1235 nf_ct_add_to_dying_list(ct);
1236 NF_CT_STAT_INC(net, chaintoolong);
1237 NF_CT_STAT_INC(net, insert_failed);
1238 ret = NF_DROP;
1239 goto dying;
1240 }
1241 }
1242
1243 /* Timer relative to confirmation time, not original
1244 setting time, otherwise we'd get timer wrap in
1245 weird delay cases. */
1246 ct->timeout += nfct_time_stamp;
1247
1248 __nf_conntrack_insert_prepare(ct);
1249
1250 /* Since the lookup is lockless, hash insertion must be done after
1251 * starting the timer and setting the CONFIRMED bit. The RCU barriers
1252 * guarantee that no other CPU can find the conntrack before the above
1253 * stores are visible.
1254 */
1255 __nf_conntrack_hash_insert(ct, hash, reply_hash);
1256 nf_conntrack_double_unlock(hash, reply_hash);
1257 local_bh_enable();
1258
1259 help = nfct_help(ct);
1260 if (help && help->helper)
1261 nf_conntrack_event_cache(IPCT_HELPER, ct);
1262
1263 nf_conntrack_event_cache(master_ct(ct) ?
1264 IPCT_RELATED : IPCT_NEW, ct);
1265 return NF_ACCEPT;
1266
1267 out:
1268 ret = nf_ct_resolve_clash(skb, h, reply_hash);
1269 dying:
1270 nf_conntrack_double_unlock(hash, reply_hash);
1271 local_bh_enable();
1272 return ret;
1273 }
1274 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
1275
1276 /* Returns true if a connection correspondings to the tuple (required
1277 for NAT). */
1278 int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)1279 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1280 const struct nf_conn *ignored_conntrack)
1281 {
1282 struct net *net = nf_ct_net(ignored_conntrack);
1283 const struct nf_conntrack_zone *zone;
1284 struct nf_conntrack_tuple_hash *h;
1285 struct hlist_nulls_head *ct_hash;
1286 unsigned int hash, hsize;
1287 struct hlist_nulls_node *n;
1288 struct nf_conn *ct;
1289
1290 zone = nf_ct_zone(ignored_conntrack);
1291
1292 rcu_read_lock();
1293 begin:
1294 nf_conntrack_get_ht(&ct_hash, &hsize);
1295 hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
1296
1297 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
1298 ct = nf_ct_tuplehash_to_ctrack(h);
1299
1300 if (ct == ignored_conntrack)
1301 continue;
1302
1303 if (nf_ct_is_expired(ct)) {
1304 nf_ct_gc_expired(ct);
1305 continue;
1306 }
1307
1308 if (nf_ct_key_equal(h, tuple, zone, net)) {
1309 /* Tuple is taken already, so caller will need to find
1310 * a new source port to use.
1311 *
1312 * Only exception:
1313 * If the *original tuples* are identical, then both
1314 * conntracks refer to the same flow.
1315 * This is a rare situation, it can occur e.g. when
1316 * more than one UDP packet is sent from same socket
1317 * in different threads.
1318 *
1319 * Let nf_ct_resolve_clash() deal with this later.
1320 */
1321 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1322 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
1323 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
1324 continue;
1325
1326 NF_CT_STAT_INC_ATOMIC(net, found);
1327 rcu_read_unlock();
1328 return 1;
1329 }
1330 }
1331
1332 if (get_nulls_value(n) != hash) {
1333 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1334 goto begin;
1335 }
1336
1337 rcu_read_unlock();
1338
1339 return 0;
1340 }
1341 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1342
1343 #define NF_CT_EVICTION_RANGE 8
1344
1345 /* There's a small race here where we may free a just-assured
1346 connection. Too bad: we're in trouble anyway. */
early_drop_list(struct net * net,struct hlist_nulls_head * head)1347 static unsigned int early_drop_list(struct net *net,
1348 struct hlist_nulls_head *head)
1349 {
1350 struct nf_conntrack_tuple_hash *h;
1351 struct hlist_nulls_node *n;
1352 unsigned int drops = 0;
1353 struct nf_conn *tmp;
1354
1355 hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1356 tmp = nf_ct_tuplehash_to_ctrack(h);
1357
1358 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1359 continue;
1360
1361 if (nf_ct_is_expired(tmp)) {
1362 nf_ct_gc_expired(tmp);
1363 continue;
1364 }
1365
1366 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1367 !net_eq(nf_ct_net(tmp), net) ||
1368 nf_ct_is_dying(tmp))
1369 continue;
1370
1371 if (!refcount_inc_not_zero(&tmp->ct_general.use))
1372 continue;
1373
1374 /* kill only if still in same netns -- might have moved due to
1375 * SLAB_TYPESAFE_BY_RCU rules.
1376 *
1377 * We steal the timer reference. If that fails timer has
1378 * already fired or someone else deleted it. Just drop ref
1379 * and move to next entry.
1380 */
1381 if (net_eq(nf_ct_net(tmp), net) &&
1382 nf_ct_is_confirmed(tmp) &&
1383 nf_ct_delete(tmp, 0, 0))
1384 drops++;
1385
1386 nf_ct_put(tmp);
1387 }
1388
1389 return drops;
1390 }
1391
early_drop(struct net * net,unsigned int hash)1392 static noinline int early_drop(struct net *net, unsigned int hash)
1393 {
1394 unsigned int i, bucket;
1395
1396 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1397 struct hlist_nulls_head *ct_hash;
1398 unsigned int hsize, drops;
1399
1400 rcu_read_lock();
1401 nf_conntrack_get_ht(&ct_hash, &hsize);
1402 if (!i)
1403 bucket = reciprocal_scale(hash, hsize);
1404 else
1405 bucket = (bucket + 1) % hsize;
1406
1407 drops = early_drop_list(net, &ct_hash[bucket]);
1408 rcu_read_unlock();
1409
1410 if (drops) {
1411 NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1412 return true;
1413 }
1414 }
1415
1416 return false;
1417 }
1418
gc_worker_skip_ct(const struct nf_conn * ct)1419 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1420 {
1421 return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1422 }
1423
gc_worker_can_early_drop(const struct nf_conn * ct)1424 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1425 {
1426 const struct nf_conntrack_l4proto *l4proto;
1427
1428 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1429 return true;
1430
1431 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1432 if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1433 return true;
1434
1435 return false;
1436 }
1437
gc_worker(struct work_struct * work)1438 static void gc_worker(struct work_struct *work)
1439 {
1440 unsigned int i, hashsz, nf_conntrack_max95 = 0;
1441 u32 end_time, start_time = nfct_time_stamp;
1442 struct conntrack_gc_work *gc_work;
1443 unsigned int expired_count = 0;
1444 unsigned long next_run;
1445 s32 delta_time;
1446 long count;
1447
1448 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1449
1450 i = gc_work->next_bucket;
1451 if (gc_work->early_drop)
1452 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1453
1454 if (i == 0) {
1455 gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
1456 gc_work->count = GC_SCAN_INITIAL_COUNT;
1457 gc_work->start_time = start_time;
1458 }
1459
1460 next_run = gc_work->avg_timeout;
1461 count = gc_work->count;
1462
1463 end_time = start_time + GC_SCAN_MAX_DURATION;
1464
1465 do {
1466 struct nf_conntrack_tuple_hash *h;
1467 struct hlist_nulls_head *ct_hash;
1468 struct hlist_nulls_node *n;
1469 struct nf_conn *tmp;
1470
1471 rcu_read_lock();
1472
1473 nf_conntrack_get_ht(&ct_hash, &hashsz);
1474 if (i >= hashsz) {
1475 rcu_read_unlock();
1476 break;
1477 }
1478
1479 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1480 struct nf_conntrack_net *cnet;
1481 struct net *net;
1482 long expires;
1483
1484 tmp = nf_ct_tuplehash_to_ctrack(h);
1485
1486 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1487 nf_ct_offload_timeout(tmp);
1488 continue;
1489 }
1490
1491 if (expired_count > GC_SCAN_EXPIRED_MAX) {
1492 rcu_read_unlock();
1493
1494 gc_work->next_bucket = i;
1495 gc_work->avg_timeout = next_run;
1496 gc_work->count = count;
1497
1498 delta_time = nfct_time_stamp - gc_work->start_time;
1499
1500 /* re-sched immediately if total cycle time is exceeded */
1501 next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
1502 goto early_exit;
1503 }
1504
1505 if (nf_ct_is_expired(tmp)) {
1506 nf_ct_gc_expired(tmp);
1507 expired_count++;
1508 continue;
1509 }
1510
1511 expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
1512 expires = (expires - (long)next_run) / ++count;
1513 next_run += expires;
1514
1515 if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1516 continue;
1517
1518 net = nf_ct_net(tmp);
1519 cnet = nf_ct_pernet(net);
1520 if (atomic_read(&cnet->count) < nf_conntrack_max95)
1521 continue;
1522
1523 /* need to take reference to avoid possible races */
1524 if (!refcount_inc_not_zero(&tmp->ct_general.use))
1525 continue;
1526
1527 if (gc_worker_skip_ct(tmp)) {
1528 nf_ct_put(tmp);
1529 continue;
1530 }
1531
1532 if (gc_worker_can_early_drop(tmp)) {
1533 nf_ct_kill(tmp);
1534 expired_count++;
1535 }
1536
1537 nf_ct_put(tmp);
1538 }
1539
1540 /* could check get_nulls_value() here and restart if ct
1541 * was moved to another chain. But given gc is best-effort
1542 * we will just continue with next hash slot.
1543 */
1544 rcu_read_unlock();
1545 cond_resched();
1546 i++;
1547
1548 delta_time = nfct_time_stamp - end_time;
1549 if (delta_time > 0 && i < hashsz) {
1550 gc_work->avg_timeout = next_run;
1551 gc_work->count = count;
1552 gc_work->next_bucket = i;
1553 next_run = 0;
1554 goto early_exit;
1555 }
1556 } while (i < hashsz);
1557
1558 gc_work->next_bucket = 0;
1559
1560 next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
1561
1562 delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
1563 if (next_run > (unsigned long)delta_time)
1564 next_run -= delta_time;
1565 else
1566 next_run = 1;
1567
1568 early_exit:
1569 if (gc_work->exiting)
1570 return;
1571
1572 if (next_run)
1573 gc_work->early_drop = false;
1574
1575 queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1576 }
1577
conntrack_gc_work_init(struct conntrack_gc_work * gc_work)1578 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1579 {
1580 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1581 gc_work->exiting = false;
1582 }
1583
1584 static struct nf_conn *
__nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp,u32 hash)1585 __nf_conntrack_alloc(struct net *net,
1586 const struct nf_conntrack_zone *zone,
1587 const struct nf_conntrack_tuple *orig,
1588 const struct nf_conntrack_tuple *repl,
1589 gfp_t gfp, u32 hash)
1590 {
1591 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
1592 unsigned int ct_count;
1593 struct nf_conn *ct;
1594
1595 /* We don't want any race condition at early drop stage */
1596 ct_count = atomic_inc_return(&cnet->count);
1597
1598 if (nf_conntrack_max && unlikely(ct_count > nf_conntrack_max)) {
1599 if (!early_drop(net, hash)) {
1600 if (!conntrack_gc_work.early_drop)
1601 conntrack_gc_work.early_drop = true;
1602 atomic_dec(&cnet->count);
1603 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1604 return ERR_PTR(-ENOMEM);
1605 }
1606 }
1607
1608 /*
1609 * Do not use kmem_cache_zalloc(), as this cache uses
1610 * SLAB_TYPESAFE_BY_RCU.
1611 */
1612 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1613 if (ct == NULL)
1614 goto out;
1615
1616 spin_lock_init(&ct->lock);
1617 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1618 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1619 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1620 /* save hash for reusing when confirming */
1621 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1622 ct->status = 0;
1623 WRITE_ONCE(ct->timeout, 0);
1624 write_pnet(&ct->ct_net, net);
1625 memset(&ct->__nfct_init_offset, 0,
1626 offsetof(struct nf_conn, proto) -
1627 offsetof(struct nf_conn, __nfct_init_offset));
1628
1629 nf_ct_zone_add(ct, zone);
1630
1631 trace_android_rvh_nf_conn_alloc(ct);
1632
1633 /* Because we use RCU lookups, we set ct_general.use to zero before
1634 * this is inserted in any list.
1635 */
1636 refcount_set(&ct->ct_general.use, 0);
1637 return ct;
1638 out:
1639 atomic_dec(&cnet->count);
1640 return ERR_PTR(-ENOMEM);
1641 }
1642
nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp)1643 struct nf_conn *nf_conntrack_alloc(struct net *net,
1644 const struct nf_conntrack_zone *zone,
1645 const struct nf_conntrack_tuple *orig,
1646 const struct nf_conntrack_tuple *repl,
1647 gfp_t gfp)
1648 {
1649 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1650 }
1651 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1652
nf_conntrack_free(struct nf_conn * ct)1653 void nf_conntrack_free(struct nf_conn *ct)
1654 {
1655 struct net *net = nf_ct_net(ct);
1656 struct nf_conntrack_net *cnet;
1657
1658 /* A freed object has refcnt == 0, that's
1659 * the golden rule for SLAB_TYPESAFE_BY_RCU
1660 */
1661 WARN_ON(refcount_read(&ct->ct_general.use) != 0);
1662
1663 nf_ct_ext_destroy(ct);
1664 kmem_cache_free(nf_conntrack_cachep, ct);
1665 cnet = nf_ct_pernet(net);
1666
1667 smp_mb__before_atomic();
1668 trace_android_rvh_nf_conn_free(ct);
1669 atomic_dec(&cnet->count);
1670 }
1671 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1672
1673
1674 /* Allocate a new conntrack: we return -ENOMEM if classification
1675 failed due to stress. Otherwise it really is unclassifiable. */
1676 static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net * net,struct nf_conn * tmpl,const struct nf_conntrack_tuple * tuple,struct sk_buff * skb,unsigned int dataoff,u32 hash)1677 init_conntrack(struct net *net, struct nf_conn *tmpl,
1678 const struct nf_conntrack_tuple *tuple,
1679 struct sk_buff *skb,
1680 unsigned int dataoff, u32 hash)
1681 {
1682 struct nf_conn *ct;
1683 struct nf_conn_help *help;
1684 struct nf_conntrack_tuple repl_tuple;
1685 struct nf_conntrack_ecache *ecache;
1686 struct nf_conntrack_expect *exp = NULL;
1687 const struct nf_conntrack_zone *zone;
1688 struct nf_conn_timeout *timeout_ext;
1689 struct nf_conntrack_zone tmp;
1690 struct nf_conntrack_net *cnet;
1691
1692 if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
1693 pr_debug("Can't invert tuple.\n");
1694 return NULL;
1695 }
1696
1697 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1698 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1699 hash);
1700 if (IS_ERR(ct))
1701 return (struct nf_conntrack_tuple_hash *)ct;
1702
1703 if (!nf_ct_add_synproxy(ct, tmpl)) {
1704 nf_conntrack_free(ct);
1705 return ERR_PTR(-ENOMEM);
1706 }
1707
1708 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1709
1710 if (timeout_ext)
1711 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1712 GFP_ATOMIC);
1713
1714 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1715 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1716 nf_ct_labels_ext_add(ct);
1717
1718 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1719 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1720 ecache ? ecache->expmask : 0,
1721 GFP_ATOMIC);
1722
1723 local_bh_disable();
1724 cnet = nf_ct_pernet(net);
1725 if (cnet->expect_count) {
1726 spin_lock(&nf_conntrack_expect_lock);
1727 exp = nf_ct_find_expectation(net, zone, tuple);
1728 if (exp) {
1729 pr_debug("expectation arrives ct=%p exp=%p\n",
1730 ct, exp);
1731 /* Welcome, Mr. Bond. We've been expecting you... */
1732 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1733 /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1734 ct->master = exp->master;
1735 if (exp->helper) {
1736 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1737 if (help)
1738 rcu_assign_pointer(help->helper, exp->helper);
1739 }
1740
1741 #ifdef CONFIG_NF_CONNTRACK_MARK
1742 ct->mark = READ_ONCE(exp->master->mark);
1743 #endif
1744 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1745 ct->secmark = exp->master->secmark;
1746 #endif
1747 NF_CT_STAT_INC(net, expect_new);
1748 }
1749 spin_unlock(&nf_conntrack_expect_lock);
1750 }
1751 if (!exp)
1752 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1753
1754 /* Now it is inserted into the unconfirmed list, set refcount to 1. */
1755 refcount_set(&ct->ct_general.use, 1);
1756 nf_ct_add_to_unconfirmed_list(ct);
1757
1758 local_bh_enable();
1759
1760 if (exp) {
1761 if (exp->expectfn)
1762 exp->expectfn(ct, exp);
1763 nf_ct_expect_put(exp);
1764 }
1765
1766 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1767 }
1768
1769 /* On success, returns 0, sets skb->_nfct | ctinfo */
1770 static int
resolve_normal_ct(struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u_int8_t protonum,const struct nf_hook_state * state)1771 resolve_normal_ct(struct nf_conn *tmpl,
1772 struct sk_buff *skb,
1773 unsigned int dataoff,
1774 u_int8_t protonum,
1775 const struct nf_hook_state *state)
1776 {
1777 const struct nf_conntrack_zone *zone;
1778 struct nf_conntrack_tuple tuple;
1779 struct nf_conntrack_tuple_hash *h;
1780 enum ip_conntrack_info ctinfo;
1781 struct nf_conntrack_zone tmp;
1782 u32 hash, zone_id, rid;
1783 struct nf_conn *ct;
1784
1785 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1786 dataoff, state->pf, protonum, state->net,
1787 &tuple)) {
1788 pr_debug("Can't get tuple\n");
1789 return 0;
1790 }
1791
1792 /* look for tuple match */
1793 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1794
1795 zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
1796 hash = hash_conntrack_raw(&tuple, zone_id, state->net);
1797 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
1798
1799 if (!h) {
1800 rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
1801 if (zone_id != rid) {
1802 u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
1803
1804 h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
1805 }
1806 }
1807
1808 if (!h) {
1809 h = init_conntrack(state->net, tmpl, &tuple,
1810 skb, dataoff, hash);
1811 if (!h)
1812 return 0;
1813 if (IS_ERR(h))
1814 return PTR_ERR(h);
1815 }
1816 ct = nf_ct_tuplehash_to_ctrack(h);
1817
1818 /* It exists; we have (non-exclusive) reference. */
1819 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1820 ctinfo = IP_CT_ESTABLISHED_REPLY;
1821 } else {
1822 /* Once we've had two way comms, always ESTABLISHED. */
1823 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1824 pr_debug("normal packet for %p\n", ct);
1825 ctinfo = IP_CT_ESTABLISHED;
1826 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1827 pr_debug("related packet for %p\n", ct);
1828 ctinfo = IP_CT_RELATED;
1829 } else {
1830 pr_debug("new packet for %p\n", ct);
1831 ctinfo = IP_CT_NEW;
1832 }
1833 }
1834 nf_ct_set(skb, ct, ctinfo);
1835 return 0;
1836 }
1837
1838 /*
1839 * icmp packets need special treatment to handle error messages that are
1840 * related to a connection.
1841 *
1842 * Callers need to check if skb has a conntrack assigned when this
1843 * helper returns; in such case skb belongs to an already known connection.
1844 */
1845 static unsigned int __cold
nf_conntrack_handle_icmp(struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u8 protonum,const struct nf_hook_state * state)1846 nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1847 struct sk_buff *skb,
1848 unsigned int dataoff,
1849 u8 protonum,
1850 const struct nf_hook_state *state)
1851 {
1852 int ret;
1853
1854 if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1855 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1856 #if IS_ENABLED(CONFIG_IPV6)
1857 else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1858 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1859 #endif
1860 else
1861 return NF_ACCEPT;
1862
1863 if (ret <= 0)
1864 NF_CT_STAT_INC_ATOMIC(state->net, error);
1865
1866 return ret;
1867 }
1868
generic_packet(struct nf_conn * ct,struct sk_buff * skb,enum ip_conntrack_info ctinfo)1869 static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1870 enum ip_conntrack_info ctinfo)
1871 {
1872 const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1873
1874 if (!timeout)
1875 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1876
1877 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1878 return NF_ACCEPT;
1879 }
1880
1881 /* Returns verdict for packet, or -1 for invalid. */
nf_conntrack_handle_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state)1882 static int nf_conntrack_handle_packet(struct nf_conn *ct,
1883 struct sk_buff *skb,
1884 unsigned int dataoff,
1885 enum ip_conntrack_info ctinfo,
1886 const struct nf_hook_state *state)
1887 {
1888 switch (nf_ct_protonum(ct)) {
1889 case IPPROTO_TCP:
1890 return nf_conntrack_tcp_packet(ct, skb, dataoff,
1891 ctinfo, state);
1892 case IPPROTO_UDP:
1893 return nf_conntrack_udp_packet(ct, skb, dataoff,
1894 ctinfo, state);
1895 case IPPROTO_ICMP:
1896 return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
1897 #if IS_ENABLED(CONFIG_IPV6)
1898 case IPPROTO_ICMPV6:
1899 return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
1900 #endif
1901 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
1902 case IPPROTO_UDPLITE:
1903 return nf_conntrack_udplite_packet(ct, skb, dataoff,
1904 ctinfo, state);
1905 #endif
1906 #ifdef CONFIG_NF_CT_PROTO_SCTP
1907 case IPPROTO_SCTP:
1908 return nf_conntrack_sctp_packet(ct, skb, dataoff,
1909 ctinfo, state);
1910 #endif
1911 #ifdef CONFIG_NF_CT_PROTO_DCCP
1912 case IPPROTO_DCCP:
1913 return nf_conntrack_dccp_packet(ct, skb, dataoff,
1914 ctinfo, state);
1915 #endif
1916 #ifdef CONFIG_NF_CT_PROTO_GRE
1917 case IPPROTO_GRE:
1918 return nf_conntrack_gre_packet(ct, skb, dataoff,
1919 ctinfo, state);
1920 #endif
1921 }
1922
1923 return generic_packet(ct, skb, ctinfo);
1924 }
1925
1926 unsigned int
nf_conntrack_in(struct sk_buff * skb,const struct nf_hook_state * state)1927 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
1928 {
1929 enum ip_conntrack_info ctinfo;
1930 struct nf_conn *ct, *tmpl;
1931 u_int8_t protonum;
1932 int dataoff, ret;
1933
1934 tmpl = nf_ct_get(skb, &ctinfo);
1935 if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1936 /* Previously seen (loopback or untracked)? Ignore. */
1937 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1938 ctinfo == IP_CT_UNTRACKED)
1939 return NF_ACCEPT;
1940 skb->_nfct = 0;
1941 }
1942
1943 /* rcu_read_lock()ed by nf_hook_thresh */
1944 dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
1945 if (dataoff <= 0) {
1946 pr_debug("not prepared to track yet or error occurred\n");
1947 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1948 ret = NF_ACCEPT;
1949 goto out;
1950 }
1951
1952 if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1953 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1954 protonum, state);
1955 if (ret <= 0) {
1956 ret = -ret;
1957 goto out;
1958 }
1959 /* ICMP[v6] protocol trackers may assign one conntrack. */
1960 if (skb->_nfct)
1961 goto out;
1962 }
1963 repeat:
1964 ret = resolve_normal_ct(tmpl, skb, dataoff,
1965 protonum, state);
1966 if (ret < 0) {
1967 /* Too stressed to deal. */
1968 NF_CT_STAT_INC_ATOMIC(state->net, drop);
1969 ret = NF_DROP;
1970 goto out;
1971 }
1972
1973 ct = nf_ct_get(skb, &ctinfo);
1974 if (!ct) {
1975 /* Not valid part of a connection */
1976 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1977 ret = NF_ACCEPT;
1978 goto out;
1979 }
1980
1981 ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
1982 if (ret <= 0) {
1983 /* Invalid: inverse of the return code tells
1984 * the netfilter core what to do */
1985 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1986 nf_ct_put(ct);
1987 skb->_nfct = 0;
1988 /* Special case: TCP tracker reports an attempt to reopen a
1989 * closed/aborted connection. We have to go back and create a
1990 * fresh conntrack.
1991 */
1992 if (ret == -NF_REPEAT)
1993 goto repeat;
1994
1995 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1996 if (ret == -NF_DROP)
1997 NF_CT_STAT_INC_ATOMIC(state->net, drop);
1998
1999 ret = -ret;
2000 goto out;
2001 }
2002
2003 if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
2004 !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
2005 nf_conntrack_event_cache(IPCT_REPLY, ct);
2006 out:
2007 if (tmpl)
2008 nf_ct_put(tmpl);
2009
2010 return ret;
2011 }
2012 EXPORT_SYMBOL_GPL(nf_conntrack_in);
2013
2014 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
2015 implicitly racy: see __nf_conntrack_confirm */
nf_conntrack_alter_reply(struct nf_conn * ct,const struct nf_conntrack_tuple * newreply)2016 void nf_conntrack_alter_reply(struct nf_conn *ct,
2017 const struct nf_conntrack_tuple *newreply)
2018 {
2019 struct nf_conn_help *help = nfct_help(ct);
2020
2021 /* Should be unconfirmed, so not in hash table yet */
2022 WARN_ON(nf_ct_is_confirmed(ct));
2023
2024 pr_debug("Altering reply tuple of %p to ", ct);
2025 nf_ct_dump_tuple(newreply);
2026
2027 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
2028 if (ct->master || (help && !hlist_empty(&help->expectations)))
2029 return;
2030
2031 rcu_read_lock();
2032 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
2033 rcu_read_unlock();
2034 }
2035 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
2036
2037 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
__nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,u32 extra_jiffies,bool do_acct)2038 void __nf_ct_refresh_acct(struct nf_conn *ct,
2039 enum ip_conntrack_info ctinfo,
2040 const struct sk_buff *skb,
2041 u32 extra_jiffies,
2042 bool do_acct)
2043 {
2044 /* Only update if this is not a fixed timeout */
2045 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
2046 goto acct;
2047
2048 /* If not in hash table, timer will not be active yet */
2049 if (nf_ct_is_confirmed(ct))
2050 extra_jiffies += nfct_time_stamp;
2051
2052 if (READ_ONCE(ct->timeout) != extra_jiffies)
2053 WRITE_ONCE(ct->timeout, extra_jiffies);
2054 acct:
2055 if (do_acct)
2056 nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
2057 }
2058 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
2059
nf_ct_kill_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb)2060 bool nf_ct_kill_acct(struct nf_conn *ct,
2061 enum ip_conntrack_info ctinfo,
2062 const struct sk_buff *skb)
2063 {
2064 nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
2065
2066 return nf_ct_delete(ct, 0, 0);
2067 }
2068 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
2069
2070 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
2071
2072 #include <linux/netfilter/nfnetlink.h>
2073 #include <linux/netfilter/nfnetlink_conntrack.h>
2074 #include <linux/mutex.h>
2075
2076 /* Generic function for tcp/udp/sctp/dccp and alike. */
nf_ct_port_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)2077 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
2078 const struct nf_conntrack_tuple *tuple)
2079 {
2080 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
2081 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
2082 goto nla_put_failure;
2083 return 0;
2084
2085 nla_put_failure:
2086 return -1;
2087 }
2088 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
2089
2090 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
2091 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
2092 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
2093 };
2094 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
2095
nf_ct_port_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t,u_int32_t flags)2096 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
2097 struct nf_conntrack_tuple *t,
2098 u_int32_t flags)
2099 {
2100 if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
2101 if (!tb[CTA_PROTO_SRC_PORT])
2102 return -EINVAL;
2103
2104 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
2105 }
2106
2107 if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
2108 if (!tb[CTA_PROTO_DST_PORT])
2109 return -EINVAL;
2110
2111 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
2112 }
2113
2114 return 0;
2115 }
2116 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
2117
nf_ct_port_nlattr_tuple_size(void)2118 unsigned int nf_ct_port_nlattr_tuple_size(void)
2119 {
2120 static unsigned int size __read_mostly;
2121
2122 if (!size)
2123 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
2124
2125 return size;
2126 }
2127 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
2128 #endif
2129
2130 /* Used by ipt_REJECT and ip6t_REJECT. */
nf_conntrack_attach(struct sk_buff * nskb,const struct sk_buff * skb)2131 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
2132 {
2133 struct nf_conn *ct;
2134 enum ip_conntrack_info ctinfo;
2135
2136 /* This ICMP is in reverse direction to the packet which caused it */
2137 ct = nf_ct_get(skb, &ctinfo);
2138 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
2139 ctinfo = IP_CT_RELATED_REPLY;
2140 else
2141 ctinfo = IP_CT_RELATED;
2142
2143 /* Attach to new skbuff, and increment count */
2144 nf_ct_set(nskb, ct, ctinfo);
2145 nf_conntrack_get(skb_nfct(nskb));
2146 }
2147
__nf_conntrack_update(struct net * net,struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)2148 static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
2149 struct nf_conn *ct,
2150 enum ip_conntrack_info ctinfo)
2151 {
2152 struct nf_conntrack_tuple_hash *h;
2153 struct nf_conntrack_tuple tuple;
2154 struct nf_nat_hook *nat_hook;
2155 unsigned int status;
2156 int dataoff;
2157 u16 l3num;
2158 u8 l4num;
2159
2160 l3num = nf_ct_l3num(ct);
2161
2162 dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
2163 if (dataoff <= 0)
2164 return -1;
2165
2166 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
2167 l4num, net, &tuple))
2168 return -1;
2169
2170 if (ct->status & IPS_SRC_NAT) {
2171 memcpy(tuple.src.u3.all,
2172 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
2173 sizeof(tuple.src.u3.all));
2174 tuple.src.u.all =
2175 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
2176 }
2177
2178 if (ct->status & IPS_DST_NAT) {
2179 memcpy(tuple.dst.u3.all,
2180 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
2181 sizeof(tuple.dst.u3.all));
2182 tuple.dst.u.all =
2183 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
2184 }
2185
2186 h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
2187 if (!h)
2188 return 0;
2189
2190 /* Store status bits of the conntrack that is clashing to re-do NAT
2191 * mangling according to what it has been done already to this packet.
2192 */
2193 status = ct->status;
2194
2195 nf_ct_put(ct);
2196 ct = nf_ct_tuplehash_to_ctrack(h);
2197 nf_ct_set(skb, ct, ctinfo);
2198
2199 nat_hook = rcu_dereference(nf_nat_hook);
2200 if (!nat_hook)
2201 return 0;
2202
2203 if (status & IPS_SRC_NAT &&
2204 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
2205 IP_CT_DIR_ORIGINAL) == NF_DROP)
2206 return -1;
2207
2208 if (status & IPS_DST_NAT &&
2209 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
2210 IP_CT_DIR_ORIGINAL) == NF_DROP)
2211 return -1;
2212
2213 return 0;
2214 }
2215
2216 /* This packet is coming from userspace via nf_queue, complete the packet
2217 * processing after the helper invocation in nf_confirm().
2218 */
nf_confirm_cthelper(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)2219 static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
2220 enum ip_conntrack_info ctinfo)
2221 {
2222 const struct nf_conntrack_helper *helper;
2223 const struct nf_conn_help *help;
2224 int protoff;
2225
2226 help = nfct_help(ct);
2227 if (!help)
2228 return 0;
2229
2230 helper = rcu_dereference(help->helper);
2231 if (!helper)
2232 return 0;
2233
2234 if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
2235 return 0;
2236
2237 switch (nf_ct_l3num(ct)) {
2238 case NFPROTO_IPV4:
2239 protoff = skb_network_offset(skb) + ip_hdrlen(skb);
2240 break;
2241 #if IS_ENABLED(CONFIG_IPV6)
2242 case NFPROTO_IPV6: {
2243 __be16 frag_off;
2244 u8 pnum;
2245
2246 pnum = ipv6_hdr(skb)->nexthdr;
2247 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
2248 &frag_off);
2249 if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
2250 return 0;
2251 break;
2252 }
2253 #endif
2254 default:
2255 return 0;
2256 }
2257
2258 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
2259 !nf_is_loopback_packet(skb)) {
2260 if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
2261 NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
2262 return -1;
2263 }
2264 }
2265
2266 /* We've seen it coming out the other side: confirm it */
2267 return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
2268 }
2269
nf_conntrack_update(struct net * net,struct sk_buff * skb)2270 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
2271 {
2272 enum ip_conntrack_info ctinfo;
2273 struct nf_conn *ct;
2274 int err;
2275
2276 ct = nf_ct_get(skb, &ctinfo);
2277 if (!ct)
2278 return 0;
2279
2280 if (!nf_ct_is_confirmed(ct)) {
2281 err = __nf_conntrack_update(net, skb, ct, ctinfo);
2282 if (err < 0)
2283 return err;
2284
2285 ct = nf_ct_get(skb, &ctinfo);
2286 }
2287
2288 return nf_confirm_cthelper(skb, ct, ctinfo);
2289 }
2290
nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)2291 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
2292 const struct sk_buff *skb)
2293 {
2294 const struct nf_conntrack_tuple *src_tuple;
2295 const struct nf_conntrack_tuple_hash *hash;
2296 struct nf_conntrack_tuple srctuple;
2297 enum ip_conntrack_info ctinfo;
2298 struct nf_conn *ct;
2299
2300 ct = nf_ct_get(skb, &ctinfo);
2301 if (ct) {
2302 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
2303 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
2304 return true;
2305 }
2306
2307 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
2308 NFPROTO_IPV4, dev_net(skb->dev),
2309 &srctuple))
2310 return false;
2311
2312 hash = nf_conntrack_find_get(dev_net(skb->dev),
2313 &nf_ct_zone_dflt,
2314 &srctuple);
2315 if (!hash)
2316 return false;
2317
2318 ct = nf_ct_tuplehash_to_ctrack(hash);
2319 src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
2320 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
2321 nf_ct_put(ct);
2322
2323 return true;
2324 }
2325
2326 /* Bring out ya dead! */
2327 static struct nf_conn *
get_next_corpse(int (* iter)(struct nf_conn * i,void * data),void * data,unsigned int * bucket)2328 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
2329 void *data, unsigned int *bucket)
2330 {
2331 struct nf_conntrack_tuple_hash *h;
2332 struct nf_conn *ct;
2333 struct hlist_nulls_node *n;
2334 spinlock_t *lockp;
2335
2336 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
2337 struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
2338
2339 if (hlist_nulls_empty(hslot))
2340 continue;
2341
2342 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
2343 local_bh_disable();
2344 nf_conntrack_lock(lockp);
2345 hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
2346 if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
2347 continue;
2348 /* All nf_conn objects are added to hash table twice, one
2349 * for original direction tuple, once for the reply tuple.
2350 *
2351 * Exception: In the IPS_NAT_CLASH case, only the reply
2352 * tuple is added (the original tuple already existed for
2353 * a different object).
2354 *
2355 * We only need to call the iterator once for each
2356 * conntrack, so we just use the 'reply' direction
2357 * tuple while iterating.
2358 */
2359 ct = nf_ct_tuplehash_to_ctrack(h);
2360 if (iter(ct, data))
2361 goto found;
2362 }
2363 spin_unlock(lockp);
2364 local_bh_enable();
2365 cond_resched();
2366 }
2367
2368 return NULL;
2369 found:
2370 refcount_inc(&ct->ct_general.use);
2371 spin_unlock(lockp);
2372 local_bh_enable();
2373 return ct;
2374 }
2375
nf_ct_iterate_cleanup(int (* iter)(struct nf_conn * i,void * data),void * data,u32 portid,int report)2376 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2377 void *data, u32 portid, int report)
2378 {
2379 unsigned int bucket = 0;
2380 struct nf_conn *ct;
2381
2382 might_sleep();
2383
2384 mutex_lock(&nf_conntrack_mutex);
2385 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2386 /* Time to push up daises... */
2387
2388 nf_ct_delete(ct, portid, report);
2389 nf_ct_put(ct);
2390 cond_resched();
2391 }
2392 mutex_unlock(&nf_conntrack_mutex);
2393 }
2394
2395 struct iter_data {
2396 int (*iter)(struct nf_conn *i, void *data);
2397 void *data;
2398 struct net *net;
2399 };
2400
iter_net_only(struct nf_conn * i,void * data)2401 static int iter_net_only(struct nf_conn *i, void *data)
2402 {
2403 struct iter_data *d = data;
2404
2405 if (!net_eq(d->net, nf_ct_net(i)))
2406 return 0;
2407
2408 return d->iter(i, d->data);
2409 }
2410
2411 static void
__nf_ct_unconfirmed_destroy(struct net * net)2412 __nf_ct_unconfirmed_destroy(struct net *net)
2413 {
2414 int cpu;
2415
2416 for_each_possible_cpu(cpu) {
2417 struct nf_conntrack_tuple_hash *h;
2418 struct hlist_nulls_node *n;
2419 struct ct_pcpu *pcpu;
2420
2421 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2422
2423 spin_lock_bh(&pcpu->lock);
2424 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
2425 struct nf_conn *ct;
2426
2427 ct = nf_ct_tuplehash_to_ctrack(h);
2428
2429 /* we cannot call iter() on unconfirmed list, the
2430 * owning cpu can reallocate ct->ext at any time.
2431 */
2432 set_bit(IPS_DYING_BIT, &ct->status);
2433 }
2434 spin_unlock_bh(&pcpu->lock);
2435 cond_resched();
2436 }
2437 }
2438
nf_ct_unconfirmed_destroy(struct net * net)2439 void nf_ct_unconfirmed_destroy(struct net *net)
2440 {
2441 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
2442
2443 might_sleep();
2444
2445 if (atomic_read(&cnet->count) > 0) {
2446 __nf_ct_unconfirmed_destroy(net);
2447 nf_queue_nf_hook_drop(net);
2448 synchronize_net();
2449 }
2450 }
2451 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2452
nf_ct_iterate_cleanup_net(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,u32 portid,int report)2453 void nf_ct_iterate_cleanup_net(struct net *net,
2454 int (*iter)(struct nf_conn *i, void *data),
2455 void *data, u32 portid, int report)
2456 {
2457 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
2458 struct iter_data d;
2459
2460 might_sleep();
2461
2462 if (atomic_read(&cnet->count) == 0)
2463 return;
2464
2465 d.iter = iter;
2466 d.data = data;
2467 d.net = net;
2468
2469 nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2470 }
2471 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
2472
2473 /**
2474 * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2475 * @iter: callback to invoke for each conntrack
2476 * @data: data to pass to @iter
2477 *
2478 * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2479 * unconfirmed list as dying (so they will not be inserted into
2480 * main table).
2481 *
2482 * Can only be called in module exit path.
2483 */
2484 void
nf_ct_iterate_destroy(int (* iter)(struct nf_conn * i,void * data),void * data)2485 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2486 {
2487 struct net *net;
2488
2489 down_read(&net_rwsem);
2490 for_each_net(net) {
2491 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
2492
2493 if (atomic_read(&cnet->count) == 0)
2494 continue;
2495 __nf_ct_unconfirmed_destroy(net);
2496 nf_queue_nf_hook_drop(net);
2497 }
2498 up_read(&net_rwsem);
2499
2500 /* Need to wait for netns cleanup worker to finish, if its
2501 * running -- it might have deleted a net namespace from
2502 * the global list, so our __nf_ct_unconfirmed_destroy() might
2503 * not have affected all namespaces.
2504 */
2505 net_ns_barrier();
2506
2507 /* a conntrack could have been unlinked from unconfirmed list
2508 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2509 * This makes sure its inserted into conntrack table.
2510 */
2511 synchronize_net();
2512
2513 nf_ct_iterate_cleanup(iter, data, 0, 0);
2514 }
2515 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2516
kill_all(struct nf_conn * i,void * data)2517 static int kill_all(struct nf_conn *i, void *data)
2518 {
2519 return net_eq(nf_ct_net(i), data);
2520 }
2521
nf_conntrack_cleanup_start(void)2522 void nf_conntrack_cleanup_start(void)
2523 {
2524 conntrack_gc_work.exiting = true;
2525 RCU_INIT_POINTER(ip_ct_attach, NULL);
2526 }
2527
nf_conntrack_cleanup_end(void)2528 void nf_conntrack_cleanup_end(void)
2529 {
2530 RCU_INIT_POINTER(nf_ct_hook, NULL);
2531 cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2532 kvfree(nf_conntrack_hash);
2533
2534 nf_conntrack_proto_fini();
2535 nf_conntrack_seqadj_fini();
2536 nf_conntrack_labels_fini();
2537 nf_conntrack_helper_fini();
2538 nf_conntrack_timeout_fini();
2539 nf_conntrack_ecache_fini();
2540 nf_conntrack_tstamp_fini();
2541 nf_conntrack_acct_fini();
2542 nf_conntrack_expect_fini();
2543
2544 kmem_cache_destroy(nf_conntrack_cachep);
2545 }
2546
2547 /*
2548 * Mishearing the voices in his head, our hero wonders how he's
2549 * supposed to kill the mall.
2550 */
nf_conntrack_cleanup_net(struct net * net)2551 void nf_conntrack_cleanup_net(struct net *net)
2552 {
2553 LIST_HEAD(single);
2554
2555 list_add(&net->exit_list, &single);
2556 nf_conntrack_cleanup_net_list(&single);
2557 }
2558
nf_conntrack_cleanup_net_list(struct list_head * net_exit_list)2559 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2560 {
2561 int busy;
2562 struct net *net;
2563
2564 /*
2565 * This makes sure all current packets have passed through
2566 * netfilter framework. Roll on, two-stage module
2567 * delete...
2568 */
2569 synchronize_net();
2570 i_see_dead_people:
2571 busy = 0;
2572 list_for_each_entry(net, net_exit_list, exit_list) {
2573 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
2574
2575 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2576 if (atomic_read(&cnet->count) != 0)
2577 busy = 1;
2578 }
2579 if (busy) {
2580 schedule();
2581 goto i_see_dead_people;
2582 }
2583
2584 list_for_each_entry(net, net_exit_list, exit_list) {
2585 nf_conntrack_ecache_pernet_fini(net);
2586 nf_conntrack_expect_pernet_fini(net);
2587 free_percpu(net->ct.stat);
2588 free_percpu(net->ct.pcpu_lists);
2589 }
2590 }
2591
nf_ct_alloc_hashtable(unsigned int * sizep,int nulls)2592 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2593 {
2594 struct hlist_nulls_head *hash;
2595 unsigned int nr_slots, i;
2596
2597 if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2598 return NULL;
2599
2600 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2601 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2602
2603 hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
2604
2605 if (hash && nulls)
2606 for (i = 0; i < nr_slots; i++)
2607 INIT_HLIST_NULLS_HEAD(&hash[i], i);
2608
2609 return hash;
2610 }
2611 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2612
nf_conntrack_hash_resize(unsigned int hashsize)2613 int nf_conntrack_hash_resize(unsigned int hashsize)
2614 {
2615 int i, bucket;
2616 unsigned int old_size;
2617 struct hlist_nulls_head *hash, *old_hash;
2618 struct nf_conntrack_tuple_hash *h;
2619 struct nf_conn *ct;
2620
2621 if (!hashsize)
2622 return -EINVAL;
2623
2624 hash = nf_ct_alloc_hashtable(&hashsize, 1);
2625 if (!hash)
2626 return -ENOMEM;
2627
2628 mutex_lock(&nf_conntrack_mutex);
2629 old_size = nf_conntrack_htable_size;
2630 if (old_size == hashsize) {
2631 mutex_unlock(&nf_conntrack_mutex);
2632 kvfree(hash);
2633 return 0;
2634 }
2635
2636 local_bh_disable();
2637 nf_conntrack_all_lock();
2638 write_seqcount_begin(&nf_conntrack_generation);
2639
2640 /* Lookups in the old hash might happen in parallel, which means we
2641 * might get false negatives during connection lookup. New connections
2642 * created because of a false negative won't make it into the hash
2643 * though since that required taking the locks.
2644 */
2645
2646 for (i = 0; i < nf_conntrack_htable_size; i++) {
2647 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2648 unsigned int zone_id;
2649
2650 h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2651 struct nf_conntrack_tuple_hash, hnnode);
2652 ct = nf_ct_tuplehash_to_ctrack(h);
2653 hlist_nulls_del_rcu(&h->hnnode);
2654
2655 zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
2656 bucket = __hash_conntrack(nf_ct_net(ct),
2657 &h->tuple, zone_id, hashsize);
2658 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2659 }
2660 }
2661 old_size = nf_conntrack_htable_size;
2662 old_hash = nf_conntrack_hash;
2663
2664 nf_conntrack_hash = hash;
2665 nf_conntrack_htable_size = hashsize;
2666
2667 write_seqcount_end(&nf_conntrack_generation);
2668 nf_conntrack_all_unlock();
2669 local_bh_enable();
2670
2671 mutex_unlock(&nf_conntrack_mutex);
2672
2673 synchronize_net();
2674 kvfree(old_hash);
2675 return 0;
2676 }
2677
nf_conntrack_set_hashsize(const char * val,const struct kernel_param * kp)2678 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2679 {
2680 unsigned int hashsize;
2681 int rc;
2682
2683 if (current->nsproxy->net_ns != &init_net)
2684 return -EOPNOTSUPP;
2685
2686 /* On boot, we can set this without any fancy locking. */
2687 if (!nf_conntrack_hash)
2688 return param_set_uint(val, kp);
2689
2690 rc = kstrtouint(val, 0, &hashsize);
2691 if (rc)
2692 return rc;
2693
2694 return nf_conntrack_hash_resize(hashsize);
2695 }
2696
total_extension_size(void)2697 static __always_inline unsigned int total_extension_size(void)
2698 {
2699 /* remember to add new extensions below */
2700 BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2701
2702 return sizeof(struct nf_ct_ext) +
2703 sizeof(struct nf_conn_help)
2704 #if IS_ENABLED(CONFIG_NF_NAT)
2705 + sizeof(struct nf_conn_nat)
2706 #endif
2707 + sizeof(struct nf_conn_seqadj)
2708 + sizeof(struct nf_conn_acct)
2709 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2710 + sizeof(struct nf_conntrack_ecache)
2711 #endif
2712 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2713 + sizeof(struct nf_conn_tstamp)
2714 #endif
2715 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2716 + sizeof(struct nf_conn_timeout)
2717 #endif
2718 #ifdef CONFIG_NF_CONNTRACK_LABELS
2719 + sizeof(struct nf_conn_labels)
2720 #endif
2721 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2722 + sizeof(struct nf_conn_synproxy)
2723 #endif
2724 ;
2725 };
2726
nf_conntrack_init_start(void)2727 int nf_conntrack_init_start(void)
2728 {
2729 unsigned long nr_pages = totalram_pages();
2730 int max_factor = 8;
2731 int ret = -ENOMEM;
2732 int i;
2733
2734 /* struct nf_ct_ext uses u8 to store offsets/size */
2735 BUILD_BUG_ON(total_extension_size() > 255u);
2736
2737 seqcount_spinlock_init(&nf_conntrack_generation,
2738 &nf_conntrack_locks_all_lock);
2739
2740 for (i = 0; i < CONNTRACK_LOCKS; i++)
2741 spin_lock_init(&nf_conntrack_locks[i]);
2742
2743 if (!nf_conntrack_htable_size) {
2744 nf_conntrack_htable_size
2745 = (((nr_pages << PAGE_SHIFT) / 16384)
2746 / sizeof(struct hlist_head));
2747 if (BITS_PER_LONG >= 64 &&
2748 nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2749 nf_conntrack_htable_size = 262144;
2750 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2751 nf_conntrack_htable_size = 65536;
2752
2753 if (nf_conntrack_htable_size < 1024)
2754 nf_conntrack_htable_size = 1024;
2755 /* Use a max. factor of one by default to keep the average
2756 * hash chain length at 2 entries. Each entry has to be added
2757 * twice (once for original direction, once for reply).
2758 * When a table size is given we use the old value of 8 to
2759 * avoid implicit reduction of the max entries setting.
2760 */
2761 max_factor = 1;
2762 }
2763
2764 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2765 if (!nf_conntrack_hash)
2766 return -ENOMEM;
2767
2768 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2769
2770 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2771 sizeof(struct nf_conn),
2772 NFCT_INFOMASK + 1,
2773 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2774 if (!nf_conntrack_cachep)
2775 goto err_cachep;
2776
2777 ret = nf_conntrack_expect_init();
2778 if (ret < 0)
2779 goto err_expect;
2780
2781 ret = nf_conntrack_acct_init();
2782 if (ret < 0)
2783 goto err_acct;
2784
2785 ret = nf_conntrack_tstamp_init();
2786 if (ret < 0)
2787 goto err_tstamp;
2788
2789 ret = nf_conntrack_ecache_init();
2790 if (ret < 0)
2791 goto err_ecache;
2792
2793 ret = nf_conntrack_timeout_init();
2794 if (ret < 0)
2795 goto err_timeout;
2796
2797 ret = nf_conntrack_helper_init();
2798 if (ret < 0)
2799 goto err_helper;
2800
2801 ret = nf_conntrack_labels_init();
2802 if (ret < 0)
2803 goto err_labels;
2804
2805 ret = nf_conntrack_seqadj_init();
2806 if (ret < 0)
2807 goto err_seqadj;
2808
2809 ret = nf_conntrack_proto_init();
2810 if (ret < 0)
2811 goto err_proto;
2812
2813 conntrack_gc_work_init(&conntrack_gc_work);
2814 queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2815
2816 return 0;
2817
2818 err_proto:
2819 nf_conntrack_seqadj_fini();
2820 err_seqadj:
2821 nf_conntrack_labels_fini();
2822 err_labels:
2823 nf_conntrack_helper_fini();
2824 err_helper:
2825 nf_conntrack_timeout_fini();
2826 err_timeout:
2827 nf_conntrack_ecache_fini();
2828 err_ecache:
2829 nf_conntrack_tstamp_fini();
2830 err_tstamp:
2831 nf_conntrack_acct_fini();
2832 err_acct:
2833 nf_conntrack_expect_fini();
2834 err_expect:
2835 kmem_cache_destroy(nf_conntrack_cachep);
2836 err_cachep:
2837 kvfree(nf_conntrack_hash);
2838 return ret;
2839 }
2840
2841 static struct nf_ct_hook nf_conntrack_hook = {
2842 .update = nf_conntrack_update,
2843 .destroy = nf_ct_destroy,
2844 .get_tuple_skb = nf_conntrack_get_tuple_skb,
2845 };
2846
nf_conntrack_init_end(void)2847 void nf_conntrack_init_end(void)
2848 {
2849 /* For use by REJECT target */
2850 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2851 RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2852 }
2853
2854 /*
2855 * We need to use special "null" values, not used in hash table
2856 */
2857 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
2858 #define DYING_NULLS_VAL ((1<<30)+1)
2859
nf_conntrack_init_net(struct net * net)2860 int nf_conntrack_init_net(struct net *net)
2861 {
2862 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
2863 int ret = -ENOMEM;
2864 int cpu;
2865
2866 BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2867 BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
2868 atomic_set(&cnet->count, 0);
2869
2870 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2871 if (!net->ct.pcpu_lists)
2872 goto err_stat;
2873
2874 for_each_possible_cpu(cpu) {
2875 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2876
2877 spin_lock_init(&pcpu->lock);
2878 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2879 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2880 }
2881
2882 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2883 if (!net->ct.stat)
2884 goto err_pcpu_lists;
2885
2886 ret = nf_conntrack_expect_pernet_init(net);
2887 if (ret < 0)
2888 goto err_expect;
2889
2890 nf_conntrack_acct_pernet_init(net);
2891 nf_conntrack_tstamp_pernet_init(net);
2892 nf_conntrack_ecache_pernet_init(net);
2893 nf_conntrack_helper_pernet_init(net);
2894 nf_conntrack_proto_pernet_init(net);
2895
2896 return 0;
2897
2898 err_expect:
2899 free_percpu(net->ct.stat);
2900 err_pcpu_lists:
2901 free_percpu(net->ct.pcpu_lists);
2902 err_stat:
2903 return ret;
2904 }
2905