• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The IP fragmentation functionality.
7  *
8  * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
9  *		Alan Cox <alan@lxorguk.ukuu.org.uk>
10  *
11  * Fixes:
12  *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
13  *		David S. Miller :	Begin massive cleanup...
14  *		Andi Kleen	:	Add sysctls.
15  *		xxxx		:	Overlapfrag bug.
16  *		Ultima          :       ip_expire() kernel panic.
17  *		Bill Hawes	:	Frag accounting and evictor fixes.
18  *		John McDonald	:	0 length frag bug.
19  *		Alexey Kuznetsov:	SMP races, threading, cleanup.
20  *		Patrick McHardy :	LRU queue of frag heads for evictor.
21  */
22 
23 #include <linux/compiler.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/jiffies.h>
28 #include <linux/skbuff.h>
29 #include <linux/list.h>
30 #include <linux/ip.h>
31 #include <linux/icmp.h>
32 #include <linux/netdevice.h>
33 #include <linux/jhash.h>
34 #include <linux/random.h>
35 #include <net/sock.h>
36 #include <net/ip.h>
37 #include <net/icmp.h>
38 #include <net/checksum.h>
39 #include <net/inetpeer.h>
40 #include <net/inet_frag.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/inet.h>
44 #include <linux/netfilter_ipv4.h>
45 
46 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
47  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
48  * as well. Or notify me, at least. --ANK
49  */
50 
51 static int sysctl_ipfrag_max_dist __read_mostly = 64;
52 
53 struct ipfrag_skb_cb
54 {
55 	struct inet_skb_parm	h;
56 	int			offset;
57 };
58 
59 #define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
60 
61 /* Describe an entry in the "incomplete datagrams" queue. */
62 struct ipq {
63 	struct inet_frag_queue q;
64 
65 	u32		user;
66 	__be32		saddr;
67 	__be32		daddr;
68 	__be16		id;
69 	u8		protocol;
70 	int             iif;
71 	unsigned int    rid;
72 	struct inet_peer *peer;
73 };
74 
75 static struct inet_frags ip4_frags;
76 
ip_frag_nqueues(struct net * net)77 int ip_frag_nqueues(struct net *net)
78 {
79 	return net->ipv4.frags.nqueues;
80 }
81 
ip_frag_mem(struct net * net)82 int ip_frag_mem(struct net *net)
83 {
84 	return atomic_read(&net->ipv4.frags.mem);
85 }
86 
87 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
88 			 struct net_device *dev);
89 
90 struct ip4_create_arg {
91 	struct iphdr *iph;
92 	u32 user;
93 };
94 
ipqhashfn(__be16 id,__be32 saddr,__be32 daddr,u8 prot)95 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
96 {
97 	return jhash_3words((__force u32)id << 16 | prot,
98 			    (__force u32)saddr, (__force u32)daddr,
99 			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
100 }
101 
ip4_hashfn(struct inet_frag_queue * q)102 static unsigned int ip4_hashfn(struct inet_frag_queue *q)
103 {
104 	struct ipq *ipq;
105 
106 	ipq = container_of(q, struct ipq, q);
107 	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
108 }
109 
ip4_frag_match(struct inet_frag_queue * q,void * a)110 static int ip4_frag_match(struct inet_frag_queue *q, void *a)
111 {
112 	struct ipq *qp;
113 	struct ip4_create_arg *arg = a;
114 
115 	qp = container_of(q, struct ipq, q);
116 	return (qp->id == arg->iph->id &&
117 			qp->saddr == arg->iph->saddr &&
118 			qp->daddr == arg->iph->daddr &&
119 			qp->protocol == arg->iph->protocol &&
120 			qp->user == arg->user);
121 }
122 
123 /* Memory Tracking Functions. */
frag_kfree_skb(struct netns_frags * nf,struct sk_buff * skb,int * work)124 static __inline__ void frag_kfree_skb(struct netns_frags *nf,
125 		struct sk_buff *skb, int *work)
126 {
127 	if (work)
128 		*work -= skb->truesize;
129 	atomic_sub(skb->truesize, &nf->mem);
130 	kfree_skb(skb);
131 }
132 
ip4_frag_init(struct inet_frag_queue * q,void * a)133 static void ip4_frag_init(struct inet_frag_queue *q, void *a)
134 {
135 	struct ipq *qp = container_of(q, struct ipq, q);
136 	struct ip4_create_arg *arg = a;
137 
138 	qp->protocol = arg->iph->protocol;
139 	qp->id = arg->iph->id;
140 	qp->saddr = arg->iph->saddr;
141 	qp->daddr = arg->iph->daddr;
142 	qp->user = arg->user;
143 	qp->peer = sysctl_ipfrag_max_dist ?
144 		inet_getpeer(arg->iph->saddr, 1) : NULL;
145 }
146 
ip4_frag_free(struct inet_frag_queue * q)147 static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
148 {
149 	struct ipq *qp;
150 
151 	qp = container_of(q, struct ipq, q);
152 	if (qp->peer)
153 		inet_putpeer(qp->peer);
154 }
155 
156 
157 /* Destruction primitives. */
158 
ipq_put(struct ipq * ipq)159 static __inline__ void ipq_put(struct ipq *ipq)
160 {
161 	inet_frag_put(&ipq->q, &ip4_frags);
162 }
163 
164 /* Kill ipq entry. It is not destroyed immediately,
165  * because caller (and someone more) holds reference count.
166  */
ipq_kill(struct ipq * ipq)167 static void ipq_kill(struct ipq *ipq)
168 {
169 	inet_frag_kill(&ipq->q, &ip4_frags);
170 }
171 
172 /* Memory limiting on fragments.  Evictor trashes the oldest
173  * fragment queue until we are back under the threshold.
174  */
ip_evictor(struct net * net)175 static void ip_evictor(struct net *net)
176 {
177 	int evicted;
178 
179 	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
180 	if (evicted)
181 		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
182 }
183 
184 /*
185  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
186  */
ip_expire(unsigned long arg)187 static void ip_expire(unsigned long arg)
188 {
189 	struct ipq *qp;
190 	struct net *net;
191 
192 	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
193 	net = container_of(qp->q.net, struct net, ipv4.frags);
194 
195 	spin_lock(&qp->q.lock);
196 
197 	if (qp->q.last_in & INET_FRAG_COMPLETE)
198 		goto out;
199 
200 	ipq_kill(qp);
201 
202 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
203 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 
205 	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 		struct sk_buff *head = qp->q.fragments;
207 
208 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
209 		if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
210 			icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
211 			dev_put(head->dev);
212 		}
213 	}
214 out:
215 	spin_unlock(&qp->q.lock);
216 	ipq_put(qp);
217 }
218 
219 /* Find the correct entry in the "incomplete datagrams" queue for
220  * this IP datagram, and create new one, if nothing is found.
221  */
ip_find(struct net * net,struct iphdr * iph,u32 user)222 static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
223 {
224 	struct inet_frag_queue *q;
225 	struct ip4_create_arg arg;
226 	unsigned int hash;
227 
228 	arg.iph = iph;
229 	arg.user = user;
230 
231 	read_lock(&ip4_frags.lock);
232 	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
233 
234 	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
235 	if (q == NULL)
236 		goto out_nomem;
237 
238 	return container_of(q, struct ipq, q);
239 
240 out_nomem:
241 	LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
242 	return NULL;
243 }
244 
245 /* Is the fragment too far ahead to be part of ipq? */
ip_frag_too_far(struct ipq * qp)246 static inline int ip_frag_too_far(struct ipq *qp)
247 {
248 	struct inet_peer *peer = qp->peer;
249 	unsigned int max = sysctl_ipfrag_max_dist;
250 	unsigned int start, end;
251 
252 	int rc;
253 
254 	if (!peer || !max)
255 		return 0;
256 
257 	start = qp->rid;
258 	end = atomic_inc_return(&peer->rid);
259 	qp->rid = end;
260 
261 	rc = qp->q.fragments && (end - start) > max;
262 
263 	if (rc) {
264 		struct net *net;
265 
266 		net = container_of(qp->q.net, struct net, ipv4.frags);
267 		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
268 	}
269 
270 	return rc;
271 }
272 
ip_frag_reinit(struct ipq * qp)273 static int ip_frag_reinit(struct ipq *qp)
274 {
275 	struct sk_buff *fp;
276 
277 	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
278 		atomic_inc(&qp->q.refcnt);
279 		return -ETIMEDOUT;
280 	}
281 
282 	fp = qp->q.fragments;
283 	do {
284 		struct sk_buff *xp = fp->next;
285 		frag_kfree_skb(qp->q.net, fp, NULL);
286 		fp = xp;
287 	} while (fp);
288 
289 	qp->q.last_in = 0;
290 	qp->q.len = 0;
291 	qp->q.meat = 0;
292 	qp->q.fragments = NULL;
293 	qp->iif = 0;
294 
295 	return 0;
296 }
297 
298 /* Add new segment to existing queue. */
ip_frag_queue(struct ipq * qp,struct sk_buff * skb)299 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
300 {
301 	struct sk_buff *prev, *next;
302 	struct net_device *dev;
303 	int flags, offset;
304 	int ihl, end;
305 	int err = -ENOENT;
306 
307 	if (qp->q.last_in & INET_FRAG_COMPLETE)
308 		goto err;
309 
310 	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
311 	    unlikely(ip_frag_too_far(qp)) &&
312 	    unlikely(err = ip_frag_reinit(qp))) {
313 		ipq_kill(qp);
314 		goto err;
315 	}
316 
317 	offset = ntohs(ip_hdr(skb)->frag_off);
318 	flags = offset & ~IP_OFFSET;
319 	offset &= IP_OFFSET;
320 	offset <<= 3;		/* offset is in 8-byte chunks */
321 	ihl = ip_hdrlen(skb);
322 
323 	/* Determine the position of this fragment. */
324 	end = offset + skb->len - ihl;
325 	err = -EINVAL;
326 
327 	/* Is this the final fragment? */
328 	if ((flags & IP_MF) == 0) {
329 		/* If we already have some bits beyond end
330 		 * or have different end, the segment is corrrupted.
331 		 */
332 		if (end < qp->q.len ||
333 		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
334 			goto err;
335 		qp->q.last_in |= INET_FRAG_LAST_IN;
336 		qp->q.len = end;
337 	} else {
338 		if (end&7) {
339 			end &= ~7;
340 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
341 				skb->ip_summed = CHECKSUM_NONE;
342 		}
343 		if (end > qp->q.len) {
344 			/* Some bits beyond end -> corruption. */
345 			if (qp->q.last_in & INET_FRAG_LAST_IN)
346 				goto err;
347 			qp->q.len = end;
348 		}
349 	}
350 	if (end == offset)
351 		goto err;
352 
353 	err = -ENOMEM;
354 	if (pskb_pull(skb, ihl) == NULL)
355 		goto err;
356 
357 	err = pskb_trim_rcsum(skb, end - offset);
358 	if (err)
359 		goto err;
360 
361 	/* Find out which fragments are in front and at the back of us
362 	 * in the chain of fragments so far.  We must know where to put
363 	 * this fragment, right?
364 	 */
365 	prev = NULL;
366 	for (next = qp->q.fragments; next != NULL; next = next->next) {
367 		if (FRAG_CB(next)->offset >= offset)
368 			break;	/* bingo! */
369 		prev = next;
370 	}
371 
372 	/* We found where to put this one.  Check for overlap with
373 	 * preceding fragment, and, if needed, align things so that
374 	 * any overlaps are eliminated.
375 	 */
376 	if (prev) {
377 		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
378 
379 		if (i > 0) {
380 			offset += i;
381 			err = -EINVAL;
382 			if (end <= offset)
383 				goto err;
384 			err = -ENOMEM;
385 			if (!pskb_pull(skb, i))
386 				goto err;
387 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
388 				skb->ip_summed = CHECKSUM_NONE;
389 		}
390 	}
391 
392 	err = -ENOMEM;
393 
394 	while (next && FRAG_CB(next)->offset < end) {
395 		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
396 
397 		if (i < next->len) {
398 			/* Eat head of the next overlapped fragment
399 			 * and leave the loop. The next ones cannot overlap.
400 			 */
401 			if (!pskb_pull(next, i))
402 				goto err;
403 			FRAG_CB(next)->offset += i;
404 			qp->q.meat -= i;
405 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
406 				next->ip_summed = CHECKSUM_NONE;
407 			break;
408 		} else {
409 			struct sk_buff *free_it = next;
410 
411 			/* Old fragment is completely overridden with
412 			 * new one drop it.
413 			 */
414 			next = next->next;
415 
416 			if (prev)
417 				prev->next = next;
418 			else
419 				qp->q.fragments = next;
420 
421 			qp->q.meat -= free_it->len;
422 			frag_kfree_skb(qp->q.net, free_it, NULL);
423 		}
424 	}
425 
426 	FRAG_CB(skb)->offset = offset;
427 
428 	/* Insert this fragment in the chain of fragments. */
429 	skb->next = next;
430 	if (prev)
431 		prev->next = skb;
432 	else
433 		qp->q.fragments = skb;
434 
435 	dev = skb->dev;
436 	if (dev) {
437 		qp->iif = dev->ifindex;
438 		skb->dev = NULL;
439 	}
440 	qp->q.stamp = skb->tstamp;
441 	qp->q.meat += skb->len;
442 	atomic_add(skb->truesize, &qp->q.net->mem);
443 	if (offset == 0)
444 		qp->q.last_in |= INET_FRAG_FIRST_IN;
445 
446 	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
447 	    qp->q.meat == qp->q.len)
448 		return ip_frag_reasm(qp, prev, dev);
449 
450 	write_lock(&ip4_frags.lock);
451 	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
452 	write_unlock(&ip4_frags.lock);
453 	return -EINPROGRESS;
454 
455 err:
456 	kfree_skb(skb);
457 	return err;
458 }
459 
460 
461 /* Build a new IP datagram from all its fragments. */
462 
ip_frag_reasm(struct ipq * qp,struct sk_buff * prev,struct net_device * dev)463 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
464 			 struct net_device *dev)
465 {
466 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
467 	struct iphdr *iph;
468 	struct sk_buff *fp, *head = qp->q.fragments;
469 	int len;
470 	int ihlen;
471 	int err;
472 
473 	ipq_kill(qp);
474 
475 	/* Make the one we just received the head. */
476 	if (prev) {
477 		head = prev->next;
478 		fp = skb_clone(head, GFP_ATOMIC);
479 		if (!fp)
480 			goto out_nomem;
481 
482 		fp->next = head->next;
483 		prev->next = fp;
484 
485 		skb_morph(head, qp->q.fragments);
486 		head->next = qp->q.fragments->next;
487 
488 		kfree_skb(qp->q.fragments);
489 		qp->q.fragments = head;
490 	}
491 
492 	WARN_ON(head == NULL);
493 	WARN_ON(FRAG_CB(head)->offset != 0);
494 
495 	/* Allocate a new buffer for the datagram. */
496 	ihlen = ip_hdrlen(head);
497 	len = ihlen + qp->q.len;
498 
499 	err = -E2BIG;
500 	if (len > 65535)
501 		goto out_oversize;
502 
503 	/* Head of list must not be cloned. */
504 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
505 		goto out_nomem;
506 
507 	/* If the first fragment is fragmented itself, we split
508 	 * it to two chunks: the first with data and paged part
509 	 * and the second, holding only fragments. */
510 	if (skb_shinfo(head)->frag_list) {
511 		struct sk_buff *clone;
512 		int i, plen = 0;
513 
514 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
515 			goto out_nomem;
516 		clone->next = head->next;
517 		head->next = clone;
518 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
519 		skb_shinfo(head)->frag_list = NULL;
520 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
521 			plen += skb_shinfo(head)->frags[i].size;
522 		clone->len = clone->data_len = head->data_len - plen;
523 		head->data_len -= clone->len;
524 		head->len -= clone->len;
525 		clone->csum = 0;
526 		clone->ip_summed = head->ip_summed;
527 		atomic_add(clone->truesize, &qp->q.net->mem);
528 	}
529 
530 	skb_shinfo(head)->frag_list = head->next;
531 	skb_push(head, head->data - skb_network_header(head));
532 	atomic_sub(head->truesize, &qp->q.net->mem);
533 
534 	for (fp=head->next; fp; fp = fp->next) {
535 		head->data_len += fp->len;
536 		head->len += fp->len;
537 		if (head->ip_summed != fp->ip_summed)
538 			head->ip_summed = CHECKSUM_NONE;
539 		else if (head->ip_summed == CHECKSUM_COMPLETE)
540 			head->csum = csum_add(head->csum, fp->csum);
541 		head->truesize += fp->truesize;
542 		atomic_sub(fp->truesize, &qp->q.net->mem);
543 	}
544 
545 	head->next = NULL;
546 	head->dev = dev;
547 	head->tstamp = qp->q.stamp;
548 
549 	iph = ip_hdr(head);
550 	iph->frag_off = 0;
551 	iph->tot_len = htons(len);
552 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
553 	qp->q.fragments = NULL;
554 	return 0;
555 
556 out_nomem:
557 	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
558 			      "queue %p\n", qp);
559 	err = -ENOMEM;
560 	goto out_fail;
561 out_oversize:
562 	if (net_ratelimit())
563 		printk(KERN_INFO "Oversized IP packet from %pI4.\n",
564 			&qp->saddr);
565 out_fail:
566 	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
567 	return err;
568 }
569 
570 /* Process an incoming IP datagram fragment. */
ip_defrag(struct sk_buff * skb,u32 user)571 int ip_defrag(struct sk_buff *skb, u32 user)
572 {
573 	struct ipq *qp;
574 	struct net *net;
575 
576 	net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
577 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
578 
579 	/* Start by cleaning up the memory. */
580 	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
581 		ip_evictor(net);
582 
583 	/* Lookup (or create) queue header */
584 	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
585 		int ret;
586 
587 		spin_lock(&qp->q.lock);
588 
589 		ret = ip_frag_queue(qp, skb);
590 
591 		spin_unlock(&qp->q.lock);
592 		ipq_put(qp);
593 		return ret;
594 	}
595 
596 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
597 	kfree_skb(skb);
598 	return -ENOMEM;
599 }
600 
601 #ifdef CONFIG_SYSCTL
602 static int zero;
603 
604 static struct ctl_table ip4_frags_ns_ctl_table[] = {
605 	{
606 		.ctl_name	= NET_IPV4_IPFRAG_HIGH_THRESH,
607 		.procname	= "ipfrag_high_thresh",
608 		.data		= &init_net.ipv4.frags.high_thresh,
609 		.maxlen		= sizeof(int),
610 		.mode		= 0644,
611 		.proc_handler	= proc_dointvec
612 	},
613 	{
614 		.ctl_name	= NET_IPV4_IPFRAG_LOW_THRESH,
615 		.procname	= "ipfrag_low_thresh",
616 		.data		= &init_net.ipv4.frags.low_thresh,
617 		.maxlen		= sizeof(int),
618 		.mode		= 0644,
619 		.proc_handler	= proc_dointvec
620 	},
621 	{
622 		.ctl_name	= NET_IPV4_IPFRAG_TIME,
623 		.procname	= "ipfrag_time",
624 		.data		= &init_net.ipv4.frags.timeout,
625 		.maxlen		= sizeof(int),
626 		.mode		= 0644,
627 		.proc_handler	= proc_dointvec_jiffies,
628 		.strategy	= sysctl_jiffies
629 	},
630 	{ }
631 };
632 
633 static struct ctl_table ip4_frags_ctl_table[] = {
634 	{
635 		.ctl_name	= NET_IPV4_IPFRAG_SECRET_INTERVAL,
636 		.procname	= "ipfrag_secret_interval",
637 		.data		= &ip4_frags.secret_interval,
638 		.maxlen		= sizeof(int),
639 		.mode		= 0644,
640 		.proc_handler	= proc_dointvec_jiffies,
641 		.strategy	= sysctl_jiffies
642 	},
643 	{
644 		.procname	= "ipfrag_max_dist",
645 		.data		= &sysctl_ipfrag_max_dist,
646 		.maxlen		= sizeof(int),
647 		.mode		= 0644,
648 		.proc_handler	= proc_dointvec_minmax,
649 		.extra1		= &zero
650 	},
651 	{ }
652 };
653 
ip4_frags_ns_ctl_register(struct net * net)654 static int ip4_frags_ns_ctl_register(struct net *net)
655 {
656 	struct ctl_table *table;
657 	struct ctl_table_header *hdr;
658 
659 	table = ip4_frags_ns_ctl_table;
660 	if (net != &init_net) {
661 		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
662 		if (table == NULL)
663 			goto err_alloc;
664 
665 		table[0].data = &net->ipv4.frags.high_thresh;
666 		table[1].data = &net->ipv4.frags.low_thresh;
667 		table[2].data = &net->ipv4.frags.timeout;
668 	}
669 
670 	hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
671 	if (hdr == NULL)
672 		goto err_reg;
673 
674 	net->ipv4.frags_hdr = hdr;
675 	return 0;
676 
677 err_reg:
678 	if (net != &init_net)
679 		kfree(table);
680 err_alloc:
681 	return -ENOMEM;
682 }
683 
ip4_frags_ns_ctl_unregister(struct net * net)684 static void ip4_frags_ns_ctl_unregister(struct net *net)
685 {
686 	struct ctl_table *table;
687 
688 	table = net->ipv4.frags_hdr->ctl_table_arg;
689 	unregister_net_sysctl_table(net->ipv4.frags_hdr);
690 	kfree(table);
691 }
692 
ip4_frags_ctl_register(void)693 static void ip4_frags_ctl_register(void)
694 {
695 	register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
696 }
697 #else
ip4_frags_ns_ctl_register(struct net * net)698 static inline int ip4_frags_ns_ctl_register(struct net *net)
699 {
700 	return 0;
701 }
702 
ip4_frags_ns_ctl_unregister(struct net * net)703 static inline void ip4_frags_ns_ctl_unregister(struct net *net)
704 {
705 }
706 
ip4_frags_ctl_register(void)707 static inline void ip4_frags_ctl_register(void)
708 {
709 }
710 #endif
711 
ipv4_frags_init_net(struct net * net)712 static int ipv4_frags_init_net(struct net *net)
713 {
714 	/*
715 	 * Fragment cache limits. We will commit 256K at one time. Should we
716 	 * cross that limit we will prune down to 192K. This should cope with
717 	 * even the most extreme cases without allowing an attacker to
718 	 * measurably harm machine performance.
719 	 */
720 	net->ipv4.frags.high_thresh = 256 * 1024;
721 	net->ipv4.frags.low_thresh = 192 * 1024;
722 	/*
723 	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
724 	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
725 	 * by TTL.
726 	 */
727 	net->ipv4.frags.timeout = IP_FRAG_TIME;
728 
729 	inet_frags_init_net(&net->ipv4.frags);
730 
731 	return ip4_frags_ns_ctl_register(net);
732 }
733 
ipv4_frags_exit_net(struct net * net)734 static void ipv4_frags_exit_net(struct net *net)
735 {
736 	ip4_frags_ns_ctl_unregister(net);
737 	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
738 }
739 
740 static struct pernet_operations ip4_frags_ops = {
741 	.init = ipv4_frags_init_net,
742 	.exit = ipv4_frags_exit_net,
743 };
744 
ipfrag_init(void)745 void __init ipfrag_init(void)
746 {
747 	ip4_frags_ctl_register();
748 	register_pernet_subsys(&ip4_frags_ops);
749 	ip4_frags.hashfn = ip4_hashfn;
750 	ip4_frags.constructor = ip4_frag_init;
751 	ip4_frags.destructor = ip4_frag_free;
752 	ip4_frags.skb_free = NULL;
753 	ip4_frags.qsize = sizeof(struct ipq);
754 	ip4_frags.match = ip4_frag_match;
755 	ip4_frags.frag_expire = ip_expire;
756 	ip4_frags.secret_interval = 10 * 60 * HZ;
757 	inet_frags_init(&ip4_frags);
758 }
759 
760 EXPORT_SYMBOL(ip_defrag);
761