1 /*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29
30 #define pr_fmt(fmt) "IPv6: " fmt
31
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
49
50 #include <net/sock.h>
51 #include <net/snmp.h>
52
53 #include <net/ipv6.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/inet_frag.h>
61 #include <net/inet_ecn.h>
62
63 struct ip6frag_skb_cb
64 {
65 struct inet6_skb_parm h;
66 int offset;
67 };
68
69 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
70
ip6_frag_ecn(const struct ipv6hdr * ipv6h)71 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
72 {
73 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
74 }
75
76 static struct inet_frags ip6_frags;
77
78 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
79 struct net_device *dev);
80
81 /*
82 * callers should be careful not to use the hash value outside the ipfrag_lock
83 * as doing so could race with ipfrag_hash_rnd being recalculated.
84 */
inet6_hash_frag(__be32 id,const struct in6_addr * saddr,const struct in6_addr * daddr,u32 rnd)85 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
86 const struct in6_addr *daddr, u32 rnd)
87 {
88 u32 c;
89
90 c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
91 (__force u32)id, rnd);
92
93 return c & (INETFRAGS_HASHSZ - 1);
94 }
95 EXPORT_SYMBOL_GPL(inet6_hash_frag);
96
ip6_hashfn(struct inet_frag_queue * q)97 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
98 {
99 struct frag_queue *fq;
100
101 fq = container_of(q, struct frag_queue, q);
102 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
103 }
104
ip6_frag_match(struct inet_frag_queue * q,void * a)105 bool ip6_frag_match(struct inet_frag_queue *q, void *a)
106 {
107 struct frag_queue *fq;
108 struct ip6_create_arg *arg = a;
109
110 fq = container_of(q, struct frag_queue, q);
111 return fq->id == arg->id &&
112 fq->user == arg->user &&
113 ipv6_addr_equal(&fq->saddr, arg->src) &&
114 ipv6_addr_equal(&fq->daddr, arg->dst);
115 }
116 EXPORT_SYMBOL(ip6_frag_match);
117
ip6_frag_init(struct inet_frag_queue * q,void * a)118 void ip6_frag_init(struct inet_frag_queue *q, void *a)
119 {
120 struct frag_queue *fq = container_of(q, struct frag_queue, q);
121 struct ip6_create_arg *arg = a;
122
123 fq->id = arg->id;
124 fq->user = arg->user;
125 fq->saddr = *arg->src;
126 fq->daddr = *arg->dst;
127 fq->ecn = arg->ecn;
128 }
129 EXPORT_SYMBOL(ip6_frag_init);
130
ip6_expire_frag_queue(struct net * net,struct frag_queue * fq,struct inet_frags * frags)131 void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
132 struct inet_frags *frags)
133 {
134 struct net_device *dev = NULL;
135
136 spin_lock(&fq->q.lock);
137
138 if (fq->q.last_in & INET_FRAG_COMPLETE)
139 goto out;
140
141 inet_frag_kill(&fq->q, frags);
142
143 rcu_read_lock();
144 dev = dev_get_by_index_rcu(net, fq->iif);
145 if (!dev)
146 goto out_rcu_unlock;
147
148 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
149 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
150
151 /* Don't send error if the first segment did not arrive. */
152 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
153 goto out_rcu_unlock;
154
155 /*
156 But use as source device on which LAST ARRIVED
157 segment was received. And do not use fq->dev
158 pointer directly, device might already disappeared.
159 */
160 fq->q.fragments->dev = dev;
161 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
162 out_rcu_unlock:
163 rcu_read_unlock();
164 out:
165 spin_unlock(&fq->q.lock);
166 inet_frag_put(&fq->q, frags);
167 }
168 EXPORT_SYMBOL(ip6_expire_frag_queue);
169
ip6_frag_expire(unsigned long data)170 static void ip6_frag_expire(unsigned long data)
171 {
172 struct frag_queue *fq;
173 struct net *net;
174
175 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
176 net = container_of(fq->q.net, struct net, ipv6.frags);
177
178 ip6_expire_frag_queue(net, fq, &ip6_frags);
179 }
180
181 static __inline__ struct frag_queue *
fq_find(struct net * net,__be32 id,const struct in6_addr * src,const struct in6_addr * dst,u8 ecn)182 fq_find(struct net *net, __be32 id, const struct in6_addr *src,
183 const struct in6_addr *dst, u8 ecn)
184 {
185 struct inet_frag_queue *q;
186 struct ip6_create_arg arg;
187 unsigned int hash;
188
189 arg.id = id;
190 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
191 arg.src = src;
192 arg.dst = dst;
193 arg.ecn = ecn;
194
195 read_lock(&ip6_frags.lock);
196 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
197
198 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
199 if (IS_ERR_OR_NULL(q)) {
200 inet_frag_maybe_warn_overflow(q, pr_fmt());
201 return NULL;
202 }
203 return container_of(q, struct frag_queue, q);
204 }
205
ip6_frag_queue(struct frag_queue * fq,struct sk_buff * skb,struct frag_hdr * fhdr,int nhoff)206 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
207 struct frag_hdr *fhdr, int nhoff)
208 {
209 struct sk_buff *prev, *next;
210 struct net_device *dev;
211 int offset, end;
212 struct net *net = dev_net(skb_dst(skb)->dev);
213 u8 ecn;
214
215 if (fq->q.last_in & INET_FRAG_COMPLETE)
216 goto err;
217
218 offset = ntohs(fhdr->frag_off) & ~0x7;
219 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
220 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
221
222 if ((unsigned int)end > IPV6_MAXPLEN) {
223 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
224 IPSTATS_MIB_INHDRERRORS);
225 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
226 ((u8 *)&fhdr->frag_off -
227 skb_network_header(skb)));
228 return -1;
229 }
230
231 ecn = ip6_frag_ecn(ipv6_hdr(skb));
232
233 if (skb->ip_summed == CHECKSUM_COMPLETE) {
234 const unsigned char *nh = skb_network_header(skb);
235 skb->csum = csum_sub(skb->csum,
236 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
237 0));
238 }
239
240 /* Is this the final fragment? */
241 if (!(fhdr->frag_off & htons(IP6_MF))) {
242 /* If we already have some bits beyond end
243 * or have different end, the segment is corrupted.
244 */
245 if (end < fq->q.len ||
246 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
247 goto err;
248 fq->q.last_in |= INET_FRAG_LAST_IN;
249 fq->q.len = end;
250 } else {
251 /* Check if the fragment is rounded to 8 bytes.
252 * Required by the RFC.
253 */
254 if (end & 0x7) {
255 /* RFC2460 says always send parameter problem in
256 * this case. -DaveM
257 */
258 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
259 IPSTATS_MIB_INHDRERRORS);
260 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
261 offsetof(struct ipv6hdr, payload_len));
262 return -1;
263 }
264 if (end > fq->q.len) {
265 /* Some bits beyond end -> corruption. */
266 if (fq->q.last_in & INET_FRAG_LAST_IN)
267 goto err;
268 fq->q.len = end;
269 }
270 }
271
272 if (end == offset)
273 goto err;
274
275 /* Point into the IP datagram 'data' part. */
276 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
277 goto err;
278
279 if (pskb_trim_rcsum(skb, end - offset))
280 goto err;
281
282 /* Find out which fragments are in front and at the back of us
283 * in the chain of fragments so far. We must know where to put
284 * this fragment, right?
285 */
286 prev = fq->q.fragments_tail;
287 if (!prev || FRAG6_CB(prev)->offset < offset) {
288 next = NULL;
289 goto found;
290 }
291 prev = NULL;
292 for(next = fq->q.fragments; next != NULL; next = next->next) {
293 if (FRAG6_CB(next)->offset >= offset)
294 break; /* bingo! */
295 prev = next;
296 }
297
298 found:
299 /* RFC5722, Section 4, amended by Errata ID : 3089
300 * When reassembling an IPv6 datagram, if
301 * one or more its constituent fragments is determined to be an
302 * overlapping fragment, the entire datagram (and any constituent
303 * fragments) MUST be silently discarded.
304 */
305
306 /* Check for overlap with preceding fragment. */
307 if (prev &&
308 (FRAG6_CB(prev)->offset + prev->len) > offset)
309 goto discard_fq;
310
311 /* Look for overlap with succeeding segment. */
312 if (next && FRAG6_CB(next)->offset < end)
313 goto discard_fq;
314
315 FRAG6_CB(skb)->offset = offset;
316
317 /* Insert this fragment in the chain of fragments. */
318 skb->next = next;
319 if (!next)
320 fq->q.fragments_tail = skb;
321 if (prev)
322 prev->next = skb;
323 else
324 fq->q.fragments = skb;
325
326 dev = skb->dev;
327 if (dev) {
328 fq->iif = dev->ifindex;
329 skb->dev = NULL;
330 }
331 fq->q.stamp = skb->tstamp;
332 fq->q.meat += skb->len;
333 fq->ecn |= ecn;
334 add_frag_mem_limit(&fq->q, skb->truesize);
335
336 /* The first fragment.
337 * nhoffset is obtained from the first fragment, of course.
338 */
339 if (offset == 0) {
340 fq->nhoffset = nhoff;
341 fq->q.last_in |= INET_FRAG_FIRST_IN;
342 }
343
344 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
345 fq->q.meat == fq->q.len) {
346 int res;
347 unsigned long orefdst = skb->_skb_refdst;
348
349 skb->_skb_refdst = 0UL;
350 res = ip6_frag_reasm(fq, prev, dev);
351 skb->_skb_refdst = orefdst;
352 return res;
353 }
354
355 skb_dst_drop(skb);
356 inet_frag_lru_move(&fq->q);
357 return -1;
358
359 discard_fq:
360 inet_frag_kill(&fq->q, &ip6_frags);
361 err:
362 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
363 IPSTATS_MIB_REASMFAILS);
364 kfree_skb(skb);
365 return -1;
366 }
367
368 /*
369 * Check if this packet is complete.
370 * Returns NULL on failure by any reason, and pointer
371 * to current nexthdr field in reassembled frame.
372 *
373 * It is called with locked fq, and caller must check that
374 * queue is eligible for reassembly i.e. it is not COMPLETE,
375 * the last and the first frames arrived and all the bits are here.
376 */
ip6_frag_reasm(struct frag_queue * fq,struct sk_buff * prev,struct net_device * dev)377 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
378 struct net_device *dev)
379 {
380 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
381 struct sk_buff *fp, *head = fq->q.fragments;
382 int payload_len;
383 unsigned int nhoff;
384 int sum_truesize;
385 u8 ecn;
386
387 inet_frag_kill(&fq->q, &ip6_frags);
388
389 ecn = ip_frag_ecn_table[fq->ecn];
390 if (unlikely(ecn == 0xff))
391 goto out_fail;
392
393 /* Make the one we just received the head. */
394 if (prev) {
395 head = prev->next;
396 fp = skb_clone(head, GFP_ATOMIC);
397
398 if (!fp)
399 goto out_oom;
400
401 fp->next = head->next;
402 if (!fp->next)
403 fq->q.fragments_tail = fp;
404 prev->next = fp;
405
406 skb_morph(head, fq->q.fragments);
407 head->next = fq->q.fragments->next;
408
409 consume_skb(fq->q.fragments);
410 fq->q.fragments = head;
411 }
412
413 WARN_ON(head == NULL);
414 WARN_ON(FRAG6_CB(head)->offset != 0);
415
416 /* Unfragmented part is taken from the first segment. */
417 payload_len = ((head->data - skb_network_header(head)) -
418 sizeof(struct ipv6hdr) + fq->q.len -
419 sizeof(struct frag_hdr));
420 if (payload_len > IPV6_MAXPLEN)
421 goto out_oversize;
422
423 /* Head of list must not be cloned. */
424 if (skb_unclone(head, GFP_ATOMIC))
425 goto out_oom;
426
427 /* If the first fragment is fragmented itself, we split
428 * it to two chunks: the first with data and paged part
429 * and the second, holding only fragments. */
430 if (skb_has_frag_list(head)) {
431 struct sk_buff *clone;
432 int i, plen = 0;
433
434 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
435 goto out_oom;
436 clone->next = head->next;
437 head->next = clone;
438 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
439 skb_frag_list_init(head);
440 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
441 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
442 clone->len = clone->data_len = head->data_len - plen;
443 head->data_len -= clone->len;
444 head->len -= clone->len;
445 clone->csum = 0;
446 clone->ip_summed = head->ip_summed;
447 add_frag_mem_limit(&fq->q, clone->truesize);
448 }
449
450 /* We have to remove fragment header from datagram and to relocate
451 * header in order to calculate ICV correctly. */
452 nhoff = fq->nhoffset;
453 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
454 memmove(head->head + sizeof(struct frag_hdr), head->head,
455 (head->data - head->head) - sizeof(struct frag_hdr));
456 head->mac_header += sizeof(struct frag_hdr);
457 head->network_header += sizeof(struct frag_hdr);
458
459 skb_reset_transport_header(head);
460 skb_push(head, head->data - skb_network_header(head));
461
462 sum_truesize = head->truesize;
463 for (fp = head->next; fp;) {
464 bool headstolen;
465 int delta;
466 struct sk_buff *next = fp->next;
467
468 sum_truesize += fp->truesize;
469 if (head->ip_summed != fp->ip_summed)
470 head->ip_summed = CHECKSUM_NONE;
471 else if (head->ip_summed == CHECKSUM_COMPLETE)
472 head->csum = csum_add(head->csum, fp->csum);
473
474 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
475 kfree_skb_partial(fp, headstolen);
476 } else {
477 if (!skb_shinfo(head)->frag_list)
478 skb_shinfo(head)->frag_list = fp;
479 head->data_len += fp->len;
480 head->len += fp->len;
481 head->truesize += fp->truesize;
482 }
483 fp = next;
484 }
485 sub_frag_mem_limit(&fq->q, sum_truesize);
486
487 head->next = NULL;
488 head->dev = dev;
489 head->tstamp = fq->q.stamp;
490 ipv6_hdr(head)->payload_len = htons(payload_len);
491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492 IP6CB(head)->nhoff = nhoff;
493
494 /* Yes, and fold redundant checksum back. 8) */
495 if (head->ip_summed == CHECKSUM_COMPLETE)
496 head->csum = csum_partial(skb_network_header(head),
497 skb_network_header_len(head),
498 head->csum);
499
500 rcu_read_lock();
501 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
502 rcu_read_unlock();
503 fq->q.fragments = NULL;
504 fq->q.fragments_tail = NULL;
505 return 1;
506
507 out_oversize:
508 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
509 goto out_fail;
510 out_oom:
511 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
512 out_fail:
513 rcu_read_lock();
514 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
515 rcu_read_unlock();
516 return -1;
517 }
518
ipv6_frag_rcv(struct sk_buff * skb)519 static int ipv6_frag_rcv(struct sk_buff *skb)
520 {
521 struct frag_hdr *fhdr;
522 struct frag_queue *fq;
523 const struct ipv6hdr *hdr = ipv6_hdr(skb);
524 struct net *net = dev_net(skb_dst(skb)->dev);
525 int evicted;
526
527 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
528
529 /* Jumbo payload inhibits frag. header */
530 if (hdr->payload_len==0)
531 goto fail_hdr;
532
533 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
534 sizeof(struct frag_hdr))))
535 goto fail_hdr;
536
537 hdr = ipv6_hdr(skb);
538 fhdr = (struct frag_hdr *)skb_transport_header(skb);
539
540 if (!(fhdr->frag_off & htons(0xFFF9))) {
541 /* It is not a fragmented frame */
542 skb->transport_header += sizeof(struct frag_hdr);
543 IP6_INC_STATS_BH(net,
544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
545
546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
547 return 1;
548 }
549
550 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
551 if (evicted)
552 IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
553 IPSTATS_MIB_REASMFAILS, evicted);
554
555 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
556 ip6_frag_ecn(hdr));
557 if (fq != NULL) {
558 int ret;
559
560 spin_lock(&fq->q.lock);
561
562 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
563
564 spin_unlock(&fq->q.lock);
565 inet_frag_put(&fq->q, &ip6_frags);
566 return ret;
567 }
568
569 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
570 kfree_skb(skb);
571 return -1;
572
573 fail_hdr:
574 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
575 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
576 return -1;
577 }
578
579 static const struct inet6_protocol frag_protocol =
580 {
581 .handler = ipv6_frag_rcv,
582 .flags = INET6_PROTO_NOPOLICY,
583 };
584
585 #ifdef CONFIG_SYSCTL
586 static struct ctl_table ip6_frags_ns_ctl_table[] = {
587 {
588 .procname = "ip6frag_high_thresh",
589 .data = &init_net.ipv6.frags.high_thresh,
590 .maxlen = sizeof(int),
591 .mode = 0644,
592 .proc_handler = proc_dointvec
593 },
594 {
595 .procname = "ip6frag_low_thresh",
596 .data = &init_net.ipv6.frags.low_thresh,
597 .maxlen = sizeof(int),
598 .mode = 0644,
599 .proc_handler = proc_dointvec
600 },
601 {
602 .procname = "ip6frag_time",
603 .data = &init_net.ipv6.frags.timeout,
604 .maxlen = sizeof(int),
605 .mode = 0644,
606 .proc_handler = proc_dointvec_jiffies,
607 },
608 { }
609 };
610
611 static struct ctl_table ip6_frags_ctl_table[] = {
612 {
613 .procname = "ip6frag_secret_interval",
614 .data = &ip6_frags.secret_interval,
615 .maxlen = sizeof(int),
616 .mode = 0644,
617 .proc_handler = proc_dointvec_jiffies,
618 },
619 { }
620 };
621
ip6_frags_ns_sysctl_register(struct net * net)622 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
623 {
624 struct ctl_table *table;
625 struct ctl_table_header *hdr;
626
627 table = ip6_frags_ns_ctl_table;
628 if (!net_eq(net, &init_net)) {
629 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
630 if (table == NULL)
631 goto err_alloc;
632
633 table[0].data = &net->ipv6.frags.high_thresh;
634 table[1].data = &net->ipv6.frags.low_thresh;
635 table[2].data = &net->ipv6.frags.timeout;
636
637 /* Don't export sysctls to unprivileged users */
638 if (net->user_ns != &init_user_ns)
639 table[0].procname = NULL;
640 }
641
642 hdr = register_net_sysctl(net, "net/ipv6", table);
643 if (hdr == NULL)
644 goto err_reg;
645
646 net->ipv6.sysctl.frags_hdr = hdr;
647 return 0;
648
649 err_reg:
650 if (!net_eq(net, &init_net))
651 kfree(table);
652 err_alloc:
653 return -ENOMEM;
654 }
655
ip6_frags_ns_sysctl_unregister(struct net * net)656 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
657 {
658 struct ctl_table *table;
659
660 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
661 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
662 if (!net_eq(net, &init_net))
663 kfree(table);
664 }
665
666 static struct ctl_table_header *ip6_ctl_header;
667
ip6_frags_sysctl_register(void)668 static int ip6_frags_sysctl_register(void)
669 {
670 ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
671 ip6_frags_ctl_table);
672 return ip6_ctl_header == NULL ? -ENOMEM : 0;
673 }
674
ip6_frags_sysctl_unregister(void)675 static void ip6_frags_sysctl_unregister(void)
676 {
677 unregister_net_sysctl_table(ip6_ctl_header);
678 }
679 #else
ip6_frags_ns_sysctl_register(struct net * net)680 static inline int ip6_frags_ns_sysctl_register(struct net *net)
681 {
682 return 0;
683 }
684
ip6_frags_ns_sysctl_unregister(struct net * net)685 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
686 {
687 }
688
ip6_frags_sysctl_register(void)689 static inline int ip6_frags_sysctl_register(void)
690 {
691 return 0;
692 }
693
ip6_frags_sysctl_unregister(void)694 static inline void ip6_frags_sysctl_unregister(void)
695 {
696 }
697 #endif
698
ipv6_frags_init_net(struct net * net)699 static int __net_init ipv6_frags_init_net(struct net *net)
700 {
701 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
702 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
703 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
704
705 inet_frags_init_net(&net->ipv6.frags);
706
707 return ip6_frags_ns_sysctl_register(net);
708 }
709
ipv6_frags_exit_net(struct net * net)710 static void __net_exit ipv6_frags_exit_net(struct net *net)
711 {
712 ip6_frags_ns_sysctl_unregister(net);
713 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
714 }
715
716 static struct pernet_operations ip6_frags_ops = {
717 .init = ipv6_frags_init_net,
718 .exit = ipv6_frags_exit_net,
719 };
720
ipv6_frag_init(void)721 int __init ipv6_frag_init(void)
722 {
723 int ret;
724
725 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
726 if (ret)
727 goto out;
728
729 ret = ip6_frags_sysctl_register();
730 if (ret)
731 goto err_sysctl;
732
733 ret = register_pernet_subsys(&ip6_frags_ops);
734 if (ret)
735 goto err_pernet;
736
737 ip6_frags.hashfn = ip6_hashfn;
738 ip6_frags.constructor = ip6_frag_init;
739 ip6_frags.destructor = NULL;
740 ip6_frags.skb_free = NULL;
741 ip6_frags.qsize = sizeof(struct frag_queue);
742 ip6_frags.match = ip6_frag_match;
743 ip6_frags.frag_expire = ip6_frag_expire;
744 ip6_frags.secret_interval = 10 * 60 * HZ;
745 inet_frags_init(&ip6_frags);
746 out:
747 return ret;
748
749 err_pernet:
750 ip6_frags_sysctl_unregister();
751 err_sysctl:
752 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
753 goto out;
754 }
755
ipv6_frag_exit(void)756 void ipv6_frag_exit(void)
757 {
758 inet_frags_fini(&ip6_frags);
759 ip6_frags_sysctl_unregister();
760 unregister_pernet_subsys(&ip6_frags_ops);
761 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
762 }
763