• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*	6LoWPAN fragment reassembly
2  *
3  *
4  *	Authors:
5  *	Alexander Aring		<aar@pengutronix.de>
6  *
7  *	Based on: net/ipv6/reassembly.c
8  *
9  *	This program is free software; you can redistribute it and/or
10  *	modify it under the terms of the GNU General Public License
11  *	as published by the Free Software Foundation; either version
12  *	2 of the License, or (at your option) any later version.
13  */
14 
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16 
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6.h>
29 #include <net/inet_frag.h>
30 
31 #include "reassembly.h"
32 
33 static const char lowpan_frags_cache_name[] = "lowpan-frags";
34 
35 struct lowpan_frag_info {
36 	__be16 d_tag;
37 	u16 d_size;
38 	u8 d_offset;
39 };
40 
lowpan_cb(struct sk_buff * skb)41 static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
42 {
43 	return (struct lowpan_frag_info *)skb->cb;
44 }
45 
46 static struct inet_frags lowpan_frags;
47 
48 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
49 			     struct sk_buff *prev, struct net_device *dev);
50 
lowpan_hash_frag(__be16 tag,u16 d_size,const struct ieee802154_addr * saddr,const struct ieee802154_addr * daddr)51 static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
52 				     const struct ieee802154_addr *saddr,
53 				     const struct ieee802154_addr *daddr)
54 {
55 	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
56 	return jhash_3words(ieee802154_addr_hash(saddr),
57 			    ieee802154_addr_hash(daddr),
58 			    (__force u32)(tag + (d_size << 16)),
59 			    lowpan_frags.rnd);
60 }
61 
lowpan_hashfn(const struct inet_frag_queue * q)62 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
63 {
64 	const struct lowpan_frag_queue *fq;
65 
66 	fq = container_of(q, struct lowpan_frag_queue, q);
67 	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
68 }
69 
lowpan_frag_match(const struct inet_frag_queue * q,const void * a)70 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
71 {
72 	const struct lowpan_frag_queue *fq;
73 	const struct lowpan_create_arg *arg = a;
74 
75 	fq = container_of(q, struct lowpan_frag_queue, q);
76 	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
77 		ieee802154_addr_equal(&fq->saddr, arg->src) &&
78 		ieee802154_addr_equal(&fq->daddr, arg->dst);
79 }
80 
lowpan_frag_init(struct inet_frag_queue * q,const void * a)81 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
82 {
83 	const struct lowpan_create_arg *arg = a;
84 	struct lowpan_frag_queue *fq;
85 
86 	fq = container_of(q, struct lowpan_frag_queue, q);
87 
88 	fq->tag = arg->tag;
89 	fq->d_size = arg->d_size;
90 	fq->saddr = *arg->src;
91 	fq->daddr = *arg->dst;
92 }
93 
lowpan_frag_expire(unsigned long data)94 static void lowpan_frag_expire(unsigned long data)
95 {
96 	struct frag_queue *fq;
97 	struct net *net;
98 
99 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
100 	net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
101 
102 	spin_lock(&fq->q.lock);
103 
104 	if (fq->q.flags & INET_FRAG_COMPLETE)
105 		goto out;
106 
107 	inet_frag_kill(&fq->q, &lowpan_frags);
108 out:
109 	spin_unlock(&fq->q.lock);
110 	inet_frag_put(&fq->q, &lowpan_frags);
111 }
112 
113 static inline struct lowpan_frag_queue *
fq_find(struct net * net,const struct lowpan_frag_info * frag_info,const struct ieee802154_addr * src,const struct ieee802154_addr * dst)114 fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
115 	const struct ieee802154_addr *src,
116 	const struct ieee802154_addr *dst)
117 {
118 	struct inet_frag_queue *q;
119 	struct lowpan_create_arg arg;
120 	unsigned int hash;
121 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
122 		net_ieee802154_lowpan(net);
123 
124 	arg.tag = frag_info->d_tag;
125 	arg.d_size = frag_info->d_size;
126 	arg.src = src;
127 	arg.dst = dst;
128 
129 	hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
130 
131 	q = inet_frag_find(&ieee802154_lowpan->frags,
132 			   &lowpan_frags, &arg, hash);
133 	if (IS_ERR_OR_NULL(q)) {
134 		inet_frag_maybe_warn_overflow(q, pr_fmt());
135 		return NULL;
136 	}
137 	return container_of(q, struct lowpan_frag_queue, q);
138 }
139 
lowpan_frag_queue(struct lowpan_frag_queue * fq,struct sk_buff * skb,const u8 frag_type)140 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
141 			     struct sk_buff *skb, const u8 frag_type)
142 {
143 	struct sk_buff *prev, *next;
144 	struct net_device *dev;
145 	int end, offset;
146 
147 	if (fq->q.flags & INET_FRAG_COMPLETE)
148 		goto err;
149 
150 	offset = lowpan_cb(skb)->d_offset << 3;
151 	end = lowpan_cb(skb)->d_size;
152 
153 	/* Is this the final fragment? */
154 	if (offset + skb->len == end) {
155 		/* If we already have some bits beyond end
156 		 * or have different end, the segment is corrupted.
157 		 */
158 		if (end < fq->q.len ||
159 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
160 			goto err;
161 		fq->q.flags |= INET_FRAG_LAST_IN;
162 		fq->q.len = end;
163 	} else {
164 		if (end > fq->q.len) {
165 			/* Some bits beyond end -> corruption. */
166 			if (fq->q.flags & INET_FRAG_LAST_IN)
167 				goto err;
168 			fq->q.len = end;
169 		}
170 	}
171 
172 	/* Find out which fragments are in front and at the back of us
173 	 * in the chain of fragments so far.  We must know where to put
174 	 * this fragment, right?
175 	 */
176 	prev = fq->q.fragments_tail;
177 	if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
178 		next = NULL;
179 		goto found;
180 	}
181 	prev = NULL;
182 	for (next = fq->q.fragments; next != NULL; next = next->next) {
183 		if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
184 			break;	/* bingo! */
185 		prev = next;
186 	}
187 
188 found:
189 	/* Insert this fragment in the chain of fragments. */
190 	skb->next = next;
191 	if (!next)
192 		fq->q.fragments_tail = skb;
193 	if (prev)
194 		prev->next = skb;
195 	else
196 		fq->q.fragments = skb;
197 
198 	dev = skb->dev;
199 	if (dev)
200 		skb->dev = NULL;
201 
202 	fq->q.stamp = skb->tstamp;
203 	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
204 		/* Calculate uncomp. 6lowpan header to estimate full size */
205 		fq->q.meat += lowpan_uncompress_size(skb, NULL);
206 		fq->q.flags |= INET_FRAG_FIRST_IN;
207 	} else {
208 		fq->q.meat += skb->len;
209 	}
210 	add_frag_mem_limit(&fq->q, skb->truesize);
211 
212 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 	    fq->q.meat == fq->q.len) {
214 		int res;
215 		unsigned long orefdst = skb->_skb_refdst;
216 
217 		skb->_skb_refdst = 0UL;
218 		res = lowpan_frag_reasm(fq, prev, dev);
219 		skb->_skb_refdst = orefdst;
220 		return res;
221 	}
222 
223 	return -1;
224 err:
225 	kfree_skb(skb);
226 	return -1;
227 }
228 
229 /*	Check if this packet is complete.
230  *	Returns NULL on failure by any reason, and pointer
231  *	to current nexthdr field in reassembled frame.
232  *
233  *	It is called with locked fq, and caller must check that
234  *	queue is eligible for reassembly i.e. it is not COMPLETE,
235  *	the last and the first frames arrived and all the bits are here.
236  */
lowpan_frag_reasm(struct lowpan_frag_queue * fq,struct sk_buff * prev,struct net_device * dev)237 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
238 			     struct net_device *dev)
239 {
240 	struct sk_buff *fp, *head = fq->q.fragments;
241 	int sum_truesize;
242 
243 	inet_frag_kill(&fq->q, &lowpan_frags);
244 
245 	/* Make the one we just received the head. */
246 	if (prev) {
247 		head = prev->next;
248 		fp = skb_clone(head, GFP_ATOMIC);
249 
250 		if (!fp)
251 			goto out_oom;
252 
253 		fp->next = head->next;
254 		if (!fp->next)
255 			fq->q.fragments_tail = fp;
256 		prev->next = fp;
257 
258 		skb_morph(head, fq->q.fragments);
259 		head->next = fq->q.fragments->next;
260 
261 		consume_skb(fq->q.fragments);
262 		fq->q.fragments = head;
263 	}
264 
265 	/* Head of list must not be cloned. */
266 	if (skb_unclone(head, GFP_ATOMIC))
267 		goto out_oom;
268 
269 	/* If the first fragment is fragmented itself, we split
270 	 * it to two chunks: the first with data and paged part
271 	 * and the second, holding only fragments.
272 	 */
273 	if (skb_has_frag_list(head)) {
274 		struct sk_buff *clone;
275 		int i, plen = 0;
276 
277 		clone = alloc_skb(0, GFP_ATOMIC);
278 		if (!clone)
279 			goto out_oom;
280 		clone->next = head->next;
281 		head->next = clone;
282 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
283 		skb_frag_list_init(head);
284 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
285 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
286 		clone->len = head->data_len - plen;
287 		clone->data_len = clone->len;
288 		head->data_len -= clone->len;
289 		head->len -= clone->len;
290 		add_frag_mem_limit(&fq->q, clone->truesize);
291 	}
292 
293 	WARN_ON(head == NULL);
294 
295 	sum_truesize = head->truesize;
296 	for (fp = head->next; fp;) {
297 		bool headstolen;
298 		int delta;
299 		struct sk_buff *next = fp->next;
300 
301 		sum_truesize += fp->truesize;
302 		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
303 			kfree_skb_partial(fp, headstolen);
304 		} else {
305 			if (!skb_shinfo(head)->frag_list)
306 				skb_shinfo(head)->frag_list = fp;
307 			head->data_len += fp->len;
308 			head->len += fp->len;
309 			head->truesize += fp->truesize;
310 		}
311 		fp = next;
312 	}
313 	sub_frag_mem_limit(&fq->q, sum_truesize);
314 
315 	head->next = NULL;
316 	head->dev = dev;
317 	head->tstamp = fq->q.stamp;
318 
319 	fq->q.fragments = NULL;
320 	fq->q.fragments_tail = NULL;
321 
322 	return 1;
323 out_oom:
324 	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
325 	return -1;
326 }
327 
lowpan_get_frag_info(struct sk_buff * skb,const u8 frag_type,struct lowpan_frag_info * frag_info)328 static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
329 				struct lowpan_frag_info *frag_info)
330 {
331 	bool fail;
332 	u8 pattern = 0, low = 0;
333 
334 	fail = lowpan_fetch_skb(skb, &pattern, 1);
335 	fail |= lowpan_fetch_skb(skb, &low, 1);
336 	frag_info->d_size = (pattern & 7) << 8 | low;
337 	fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
338 
339 	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
340 		fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
341 	} else {
342 		skb_reset_network_header(skb);
343 		frag_info->d_offset = 0;
344 	}
345 
346 	if (unlikely(fail))
347 		return -EIO;
348 
349 	return 0;
350 }
351 
lowpan_frag_rcv(struct sk_buff * skb,const u8 frag_type)352 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
353 {
354 	struct lowpan_frag_queue *fq;
355 	struct net *net = dev_net(skb->dev);
356 	struct lowpan_frag_info *frag_info = lowpan_cb(skb);
357 	struct ieee802154_addr source, dest;
358 	int err;
359 
360 	source = mac_cb(skb)->source;
361 	dest = mac_cb(skb)->dest;
362 
363 	err = lowpan_get_frag_info(skb, frag_type, frag_info);
364 	if (err < 0)
365 		goto err;
366 
367 	if (frag_info->d_size > IPV6_MIN_MTU) {
368 		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
369 		goto err;
370 	}
371 
372 	fq = fq_find(net, frag_info, &source, &dest);
373 	if (fq != NULL) {
374 		int ret;
375 
376 		spin_lock(&fq->q.lock);
377 		ret = lowpan_frag_queue(fq, skb, frag_type);
378 		spin_unlock(&fq->q.lock);
379 
380 		inet_frag_put(&fq->q, &lowpan_frags);
381 		return ret;
382 	}
383 
384 err:
385 	kfree_skb(skb);
386 	return -1;
387 }
388 EXPORT_SYMBOL(lowpan_frag_rcv);
389 
390 #ifdef CONFIG_SYSCTL
391 static int zero;
392 
393 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
394 	{
395 		.procname	= "6lowpanfrag_high_thresh",
396 		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
397 		.maxlen		= sizeof(int),
398 		.mode		= 0644,
399 		.proc_handler	= proc_dointvec_minmax,
400 		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
401 	},
402 	{
403 		.procname	= "6lowpanfrag_low_thresh",
404 		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
405 		.maxlen		= sizeof(int),
406 		.mode		= 0644,
407 		.proc_handler	= proc_dointvec_minmax,
408 		.extra1		= &zero,
409 		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
410 	},
411 	{
412 		.procname	= "6lowpanfrag_time",
413 		.data		= &init_net.ieee802154_lowpan.frags.timeout,
414 		.maxlen		= sizeof(int),
415 		.mode		= 0644,
416 		.proc_handler	= proc_dointvec_jiffies,
417 	},
418 	{ }
419 };
420 
421 /* secret interval has been deprecated */
422 static int lowpan_frags_secret_interval_unused;
423 static struct ctl_table lowpan_frags_ctl_table[] = {
424 	{
425 		.procname	= "6lowpanfrag_secret_interval",
426 		.data		= &lowpan_frags_secret_interval_unused,
427 		.maxlen		= sizeof(int),
428 		.mode		= 0644,
429 		.proc_handler	= proc_dointvec_jiffies,
430 	},
431 	{ }
432 };
433 
lowpan_frags_ns_sysctl_register(struct net * net)434 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
435 {
436 	struct ctl_table *table;
437 	struct ctl_table_header *hdr;
438 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
439 		net_ieee802154_lowpan(net);
440 
441 	table = lowpan_frags_ns_ctl_table;
442 	if (!net_eq(net, &init_net)) {
443 		table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
444 				GFP_KERNEL);
445 		if (table == NULL)
446 			goto err_alloc;
447 
448 		table[0].data = &ieee802154_lowpan->frags.high_thresh;
449 		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
450 		table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
451 		table[1].data = &ieee802154_lowpan->frags.low_thresh;
452 		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
453 		table[2].data = &ieee802154_lowpan->frags.timeout;
454 
455 		/* Don't export sysctls to unprivileged users */
456 		if (net->user_ns != &init_user_ns)
457 			table[0].procname = NULL;
458 	}
459 
460 	hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
461 	if (hdr == NULL)
462 		goto err_reg;
463 
464 	ieee802154_lowpan->sysctl.frags_hdr = hdr;
465 	return 0;
466 
467 err_reg:
468 	if (!net_eq(net, &init_net))
469 		kfree(table);
470 err_alloc:
471 	return -ENOMEM;
472 }
473 
lowpan_frags_ns_sysctl_unregister(struct net * net)474 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
475 {
476 	struct ctl_table *table;
477 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
478 		net_ieee802154_lowpan(net);
479 
480 	table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
481 	unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
482 	if (!net_eq(net, &init_net))
483 		kfree(table);
484 }
485 
486 static struct ctl_table_header *lowpan_ctl_header;
487 
lowpan_frags_sysctl_register(void)488 static int __init lowpan_frags_sysctl_register(void)
489 {
490 	lowpan_ctl_header = register_net_sysctl(&init_net,
491 						"net/ieee802154/6lowpan",
492 						lowpan_frags_ctl_table);
493 	return lowpan_ctl_header == NULL ? -ENOMEM : 0;
494 }
495 
lowpan_frags_sysctl_unregister(void)496 static void lowpan_frags_sysctl_unregister(void)
497 {
498 	unregister_net_sysctl_table(lowpan_ctl_header);
499 }
500 #else
lowpan_frags_ns_sysctl_register(struct net * net)501 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
502 {
503 	return 0;
504 }
505 
lowpan_frags_ns_sysctl_unregister(struct net * net)506 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
507 {
508 }
509 
lowpan_frags_sysctl_register(void)510 static inline int __init lowpan_frags_sysctl_register(void)
511 {
512 	return 0;
513 }
514 
lowpan_frags_sysctl_unregister(void)515 static inline void lowpan_frags_sysctl_unregister(void)
516 {
517 }
518 #endif
519 
lowpan_frags_init_net(struct net * net)520 static int __net_init lowpan_frags_init_net(struct net *net)
521 {
522 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
523 		net_ieee802154_lowpan(net);
524 
525 	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
526 	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
527 	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
528 
529 	inet_frags_init_net(&ieee802154_lowpan->frags);
530 
531 	return lowpan_frags_ns_sysctl_register(net);
532 }
533 
lowpan_frags_exit_net(struct net * net)534 static void __net_exit lowpan_frags_exit_net(struct net *net)
535 {
536 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
537 		net_ieee802154_lowpan(net);
538 
539 	lowpan_frags_ns_sysctl_unregister(net);
540 	inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
541 }
542 
543 static struct pernet_operations lowpan_frags_ops = {
544 	.init = lowpan_frags_init_net,
545 	.exit = lowpan_frags_exit_net,
546 };
547 
lowpan_net_frag_init(void)548 int __init lowpan_net_frag_init(void)
549 {
550 	int ret;
551 
552 	ret = lowpan_frags_sysctl_register();
553 	if (ret)
554 		return ret;
555 
556 	ret = register_pernet_subsys(&lowpan_frags_ops);
557 	if (ret)
558 		goto err_pernet;
559 
560 	lowpan_frags.hashfn = lowpan_hashfn;
561 	lowpan_frags.constructor = lowpan_frag_init;
562 	lowpan_frags.destructor = NULL;
563 	lowpan_frags.skb_free = NULL;
564 	lowpan_frags.qsize = sizeof(struct frag_queue);
565 	lowpan_frags.match = lowpan_frag_match;
566 	lowpan_frags.frag_expire = lowpan_frag_expire;
567 	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
568 	ret = inet_frags_init(&lowpan_frags);
569 	if (ret)
570 		goto err_pernet;
571 
572 	return ret;
573 err_pernet:
574 	lowpan_frags_sysctl_unregister();
575 	return ret;
576 }
577 
lowpan_net_frag_exit(void)578 void lowpan_net_frag_exit(void)
579 {
580 	inet_frags_fini(&lowpan_frags);
581 	lowpan_frags_sysctl_unregister();
582 	unregister_pernet_subsys(&lowpan_frags_ops);
583 }
584