1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32 const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46 };
47 EXPORT_SYMBOL(ip_frag_ecn_table);
48
inet_frags_init(struct inet_frags * f)49 int inet_frags_init(struct inet_frags *f)
50 {
51 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
52 NULL);
53 if (!f->frags_cachep)
54 return -ENOMEM;
55
56 return 0;
57 }
58 EXPORT_SYMBOL(inet_frags_init);
59
inet_frags_fini(struct inet_frags * f)60 void inet_frags_fini(struct inet_frags *f)
61 {
62 /* We must wait that all inet_frag_destroy_rcu() have completed. */
63 rcu_barrier();
64
65 kmem_cache_destroy(f->frags_cachep);
66 f->frags_cachep = NULL;
67 }
68 EXPORT_SYMBOL(inet_frags_fini);
69
inet_frags_free_cb(void * ptr,void * arg)70 static void inet_frags_free_cb(void *ptr, void *arg)
71 {
72 struct inet_frag_queue *fq = ptr;
73
74 /* If we can not cancel the timer, it means this frag_queue
75 * is already disappearing, we have nothing to do.
76 * Otherwise, we own a refcount until the end of this function.
77 */
78 if (!del_timer(&fq->timer))
79 return;
80
81 spin_lock_bh(&fq->lock);
82 if (!(fq->flags & INET_FRAG_COMPLETE)) {
83 fq->flags |= INET_FRAG_COMPLETE;
84 atomic_dec(&fq->refcnt);
85 }
86 spin_unlock_bh(&fq->lock);
87
88 inet_frag_put(fq);
89 }
90
inet_frags_exit_net(struct netns_frags * nf)91 void inet_frags_exit_net(struct netns_frags *nf)
92 {
93 nf->high_thresh = 0; /* prevent creation of new frags */
94
95 rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
96 }
97 EXPORT_SYMBOL(inet_frags_exit_net);
98
inet_frag_kill(struct inet_frag_queue * fq)99 void inet_frag_kill(struct inet_frag_queue *fq)
100 {
101 if (del_timer(&fq->timer))
102 atomic_dec(&fq->refcnt);
103
104 if (!(fq->flags & INET_FRAG_COMPLETE)) {
105 struct netns_frags *nf = fq->net;
106
107 fq->flags |= INET_FRAG_COMPLETE;
108 rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
109 atomic_dec(&fq->refcnt);
110 }
111 }
112 EXPORT_SYMBOL(inet_frag_kill);
113
frag_kfree_skb(struct netns_frags * nf,struct inet_frags * f,struct sk_buff * skb)114 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
115 struct sk_buff *skb)
116 {
117 if (f->skb_free)
118 f->skb_free(skb);
119 kfree_skb(skb);
120 }
121
inet_frag_destroy_rcu(struct rcu_head * head)122 static void inet_frag_destroy_rcu(struct rcu_head *head)
123 {
124 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
125 rcu);
126 struct inet_frags *f = q->net->f;
127
128 if (f->destructor)
129 f->destructor(q);
130 kmem_cache_free(f->frags_cachep, q);
131 }
132
inet_frag_destroy(struct inet_frag_queue * q)133 void inet_frag_destroy(struct inet_frag_queue *q)
134 {
135 struct sk_buff *fp;
136 struct netns_frags *nf;
137 unsigned int sum, sum_truesize = 0;
138 struct inet_frags *f;
139
140 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
141 WARN_ON(del_timer(&q->timer) != 0);
142
143 /* Release all fragment data. */
144 fp = q->fragments;
145 nf = q->net;
146 f = nf->f;
147 if (fp) {
148 do {
149 struct sk_buff *xp = fp->next;
150
151 sum_truesize += fp->truesize;
152 frag_kfree_skb(nf, f, fp);
153 fp = xp;
154 } while (fp);
155 } else {
156 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
157 }
158 sum = sum_truesize + f->qsize;
159
160 call_rcu(&q->rcu, inet_frag_destroy_rcu);
161
162 sub_frag_mem_limit(nf, sum);
163 }
164 EXPORT_SYMBOL(inet_frag_destroy);
165
inet_frag_alloc(struct netns_frags * nf,struct inet_frags * f,void * arg)166 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
167 struct inet_frags *f,
168 void *arg)
169 {
170 struct inet_frag_queue *q;
171
172 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
173 return NULL;
174
175 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
176 if (!q)
177 return NULL;
178
179 q->net = nf;
180 f->constructor(q, arg);
181 add_frag_mem_limit(nf, f->qsize);
182
183 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
184 spin_lock_init(&q->lock);
185 atomic_set(&q->refcnt, 3);
186
187 return q;
188 }
189
inet_frag_create(struct netns_frags * nf,void * arg,struct inet_frag_queue ** prev)190 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
191 void *arg,
192 struct inet_frag_queue **prev)
193 {
194 struct inet_frags *f = nf->f;
195 struct inet_frag_queue *q;
196
197 q = inet_frag_alloc(nf, f, arg);
198 if (!q) {
199 *prev = ERR_PTR(-ENOMEM);
200 return NULL;
201 }
202 mod_timer(&q->timer, jiffies + nf->timeout);
203
204 *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
205 &q->node, f->rhash_params);
206 if (*prev) {
207 q->flags |= INET_FRAG_COMPLETE;
208 inet_frag_kill(q);
209 inet_frag_destroy(q);
210 return NULL;
211 }
212 return q;
213 }
214 EXPORT_SYMBOL(inet_frag_create);
215
216 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
inet_frag_find(struct netns_frags * nf,void * key)217 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
218 {
219 struct inet_frag_queue *fq = NULL, *prev;
220
221 rcu_read_lock();
222 prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
223 if (!prev)
224 fq = inet_frag_create(nf, key, &prev);
225 if (prev && !IS_ERR(prev)) {
226 fq = prev;
227 if (!atomic_inc_not_zero(&fq->refcnt))
228 fq = NULL;
229 }
230 rcu_read_unlock();
231 return fq;
232 }
233 EXPORT_SYMBOL(inet_frag_find);
234