1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 #define INETFRAGS_EVICT_BUCKETS 128
29 #define INETFRAGS_EVICT_MAX 512
30
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37 */
38 const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
43
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54
55 static unsigned int
inet_frag_hashfn(const struct inet_frags * f,const struct inet_frag_queue * q)56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60
inet_frag_may_rebuild(struct inet_frags * f)61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66
inet_frag_secret_rebuild(struct inet_frags * f)67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69 int i;
70
71 write_seqlock_bh(&f->rnd_seqlock);
72
73 if (!inet_frag_may_rebuild(f))
74 goto out;
75
76 get_random_bytes(&f->rnd, sizeof(u32));
77
78 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79 struct inet_frag_bucket *hb;
80 struct inet_frag_queue *q;
81 struct hlist_node *n;
82
83 hb = &f->hash[i];
84 spin_lock(&hb->chain_lock);
85
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87 unsigned int hval = inet_frag_hashfn(f, q);
88
89 if (hval != i) {
90 struct inet_frag_bucket *hb_dest;
91
92 hlist_del(&q->list);
93
94 /* Relink to new hash chain. */
95 hb_dest = &f->hash[hval];
96
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
104 */
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
107 hlist_add_head(&q->list, &hb_dest->chain);
108 spin_unlock(&hb_dest->chain_lock);
109 }
110 }
111 spin_unlock(&hb->chain_lock);
112 }
113
114 f->rebuild = false;
115 f->last_rebuild_jiffies = jiffies;
116 out:
117 write_sequnlock_bh(&f->rnd_seqlock);
118 }
119
inet_fragq_should_evict(const struct inet_frag_queue * q)120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121 {
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
125 return q->net->low_thresh == 0 ||
126 frag_mem_limit(q->net) >= q->net->low_thresh;
127 }
128
129 static unsigned int
inet_evict_bucket(struct inet_frags * f,struct inet_frag_bucket * hb)130 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 {
132 struct inet_frag_queue *fq;
133 struct hlist_node *n;
134 unsigned int evicted = 0;
135 HLIST_HEAD(expired);
136
137 spin_lock(&hb->chain_lock);
138
139 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
140 if (!inet_fragq_should_evict(fq))
141 continue;
142
143 if (!del_timer(&fq->timer))
144 continue;
145
146 hlist_add_head(&fq->list_evictor, &expired);
147 ++evicted;
148 }
149
150 spin_unlock(&hb->chain_lock);
151
152 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
153 f->frag_expire((unsigned long) fq);
154
155 return evicted;
156 }
157
inet_frag_worker(struct work_struct * work)158 static void inet_frag_worker(struct work_struct *work)
159 {
160 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
161 unsigned int i, evicted = 0;
162 struct inet_frags *f;
163
164 f = container_of(work, struct inet_frags, frags_work);
165
166 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
167
168 local_bh_disable();
169
170 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
171 evicted += inet_evict_bucket(f, &f->hash[i]);
172 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
173 if (evicted > INETFRAGS_EVICT_MAX)
174 break;
175 }
176
177 f->next_bucket = i;
178
179 local_bh_enable();
180
181 if (f->rebuild && inet_frag_may_rebuild(f))
182 inet_frag_secret_rebuild(f);
183 }
184
inet_frag_schedule_worker(struct inet_frags * f)185 static void inet_frag_schedule_worker(struct inet_frags *f)
186 {
187 if (unlikely(!work_pending(&f->frags_work)))
188 schedule_work(&f->frags_work);
189 }
190
inet_frags_init(struct inet_frags * f)191 int inet_frags_init(struct inet_frags *f)
192 {
193 int i;
194
195 INIT_WORK(&f->frags_work, inet_frag_worker);
196
197 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
198 struct inet_frag_bucket *hb = &f->hash[i];
199
200 spin_lock_init(&hb->chain_lock);
201 INIT_HLIST_HEAD(&hb->chain);
202 }
203
204 seqlock_init(&f->rnd_seqlock);
205 f->last_rebuild_jiffies = 0;
206 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
207 NULL);
208 if (!f->frags_cachep)
209 return -ENOMEM;
210
211 return 0;
212 }
213 EXPORT_SYMBOL(inet_frags_init);
214
inet_frags_fini(struct inet_frags * f)215 void inet_frags_fini(struct inet_frags *f)
216 {
217 cancel_work_sync(&f->frags_work);
218 kmem_cache_destroy(f->frags_cachep);
219 }
220 EXPORT_SYMBOL(inet_frags_fini);
221
inet_frags_exit_net(struct netns_frags * nf,struct inet_frags * f)222 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
223 {
224 unsigned int seq;
225 int i;
226
227 nf->low_thresh = 0;
228
229 evict_again:
230 local_bh_disable();
231 seq = read_seqbegin(&f->rnd_seqlock);
232
233 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
234 inet_evict_bucket(f, &f->hash[i]);
235
236 local_bh_enable();
237 cond_resched();
238
239 if (read_seqretry(&f->rnd_seqlock, seq) ||
240 sum_frag_mem_limit(nf))
241 goto evict_again;
242 }
243 EXPORT_SYMBOL(inet_frags_exit_net);
244
245 static struct inet_frag_bucket *
get_frag_bucket_locked(struct inet_frag_queue * fq,struct inet_frags * f)246 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
247 __acquires(hb->chain_lock)
248 {
249 struct inet_frag_bucket *hb;
250 unsigned int seq, hash;
251
252 restart:
253 seq = read_seqbegin(&f->rnd_seqlock);
254
255 hash = inet_frag_hashfn(f, fq);
256 hb = &f->hash[hash];
257
258 spin_lock(&hb->chain_lock);
259 if (read_seqretry(&f->rnd_seqlock, seq)) {
260 spin_unlock(&hb->chain_lock);
261 goto restart;
262 }
263
264 return hb;
265 }
266
fq_unlink(struct inet_frag_queue * fq,struct inet_frags * f)267 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
268 {
269 struct inet_frag_bucket *hb;
270
271 hb = get_frag_bucket_locked(fq, f);
272 hlist_del(&fq->list);
273 fq->flags |= INET_FRAG_COMPLETE;
274 spin_unlock(&hb->chain_lock);
275 }
276
inet_frag_kill(struct inet_frag_queue * fq,struct inet_frags * f)277 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
278 {
279 if (del_timer(&fq->timer))
280 atomic_dec(&fq->refcnt);
281
282 if (!(fq->flags & INET_FRAG_COMPLETE)) {
283 fq_unlink(fq, f);
284 atomic_dec(&fq->refcnt);
285 }
286 }
287 EXPORT_SYMBOL(inet_frag_kill);
288
inet_frag_destroy(struct inet_frag_queue * q,struct inet_frags * f)289 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
290 {
291 struct sk_buff *fp;
292 struct netns_frags *nf;
293 unsigned int sum, sum_truesize = 0;
294
295 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
296 WARN_ON(del_timer(&q->timer) != 0);
297
298 /* Release all fragment data. */
299 fp = q->fragments;
300 nf = q->net;
301 while (fp) {
302 struct sk_buff *xp = fp->next;
303
304 sum_truesize += fp->truesize;
305 kfree_skb(fp);
306 fp = xp;
307 }
308 sum = sum_truesize + f->qsize;
309
310 if (f->destructor)
311 f->destructor(q);
312 kmem_cache_free(f->frags_cachep, q);
313
314 sub_frag_mem_limit(nf, sum);
315 }
316 EXPORT_SYMBOL(inet_frag_destroy);
317
inet_frag_intern(struct netns_frags * nf,struct inet_frag_queue * qp_in,struct inet_frags * f,void * arg)318 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
319 struct inet_frag_queue *qp_in,
320 struct inet_frags *f,
321 void *arg)
322 {
323 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
324 struct inet_frag_queue *qp;
325
326 #ifdef CONFIG_SMP
327 /* With SMP race we have to recheck hash table, because
328 * such entry could have been created on other cpu before
329 * we acquired hash bucket lock.
330 */
331 hlist_for_each_entry(qp, &hb->chain, list) {
332 if (qp->net == nf && f->match(qp, arg)) {
333 atomic_inc(&qp->refcnt);
334 spin_unlock(&hb->chain_lock);
335 qp_in->flags |= INET_FRAG_COMPLETE;
336 inet_frag_put(qp_in, f);
337 return qp;
338 }
339 }
340 #endif
341 qp = qp_in;
342 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
343 atomic_inc(&qp->refcnt);
344
345 atomic_inc(&qp->refcnt);
346 hlist_add_head(&qp->list, &hb->chain);
347
348 spin_unlock(&hb->chain_lock);
349
350 return qp;
351 }
352
inet_frag_alloc(struct netns_frags * nf,struct inet_frags * f,void * arg)353 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
354 struct inet_frags *f,
355 void *arg)
356 {
357 struct inet_frag_queue *q;
358
359 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
360 inet_frag_schedule_worker(f);
361 return NULL;
362 }
363
364 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
365 if (!q)
366 return NULL;
367
368 q->net = nf;
369 f->constructor(q, arg);
370 add_frag_mem_limit(nf, f->qsize);
371
372 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
373 spin_lock_init(&q->lock);
374 atomic_set(&q->refcnt, 1);
375
376 return q;
377 }
378
inet_frag_create(struct netns_frags * nf,struct inet_frags * f,void * arg)379 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
380 struct inet_frags *f,
381 void *arg)
382 {
383 struct inet_frag_queue *q;
384
385 q = inet_frag_alloc(nf, f, arg);
386 if (!q)
387 return NULL;
388
389 return inet_frag_intern(nf, q, f, arg);
390 }
391
inet_frag_find(struct netns_frags * nf,struct inet_frags * f,void * key,unsigned int hash)392 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
393 struct inet_frags *f, void *key,
394 unsigned int hash)
395 {
396 struct inet_frag_bucket *hb;
397 struct inet_frag_queue *q;
398 int depth = 0;
399
400 if (frag_mem_limit(nf) > nf->low_thresh)
401 inet_frag_schedule_worker(f);
402
403 hash &= (INETFRAGS_HASHSZ - 1);
404 hb = &f->hash[hash];
405
406 spin_lock(&hb->chain_lock);
407 hlist_for_each_entry(q, &hb->chain, list) {
408 if (q->net == nf && f->match(q, key)) {
409 atomic_inc(&q->refcnt);
410 spin_unlock(&hb->chain_lock);
411 return q;
412 }
413 depth++;
414 }
415 spin_unlock(&hb->chain_lock);
416
417 if (depth <= INETFRAGS_MAXDEPTH)
418 return inet_frag_create(nf, f, key);
419
420 if (inet_frag_may_rebuild(f)) {
421 if (!f->rebuild)
422 f->rebuild = true;
423 inet_frag_schedule_worker(f);
424 }
425
426 return ERR_PTR(-ENOBUFS);
427 }
428 EXPORT_SYMBOL(inet_frag_find);
429
inet_frag_maybe_warn_overflow(struct inet_frag_queue * q,const char * prefix)430 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
431 const char *prefix)
432 {
433 static const char msg[] = "inet_frag_find: Fragment hash bucket"
434 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
435 ". Dropping fragment.\n";
436
437 if (PTR_ERR(q) == -ENOBUFS)
438 net_dbg_ratelimited("%s%s", prefix, msg);
439 }
440 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
441