• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xfrm_input.c
4  *
5  * Changes:
6  * 	YOSHIFUJI Hideaki @USAGI
7  * 		Split up af-specific portion
8  *
9  */
10 
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
23 
24 struct xfrm_trans_tasklet {
25 	struct tasklet_struct tasklet;
26 	struct sk_buff_head queue;
27 };
28 
29 struct xfrm_trans_cb {
30 	union {
31 		struct inet_skb_parm	h4;
32 #if IS_ENABLED(CONFIG_IPV6)
33 		struct inet6_skb_parm	h6;
34 #endif
35 	} header;
36 	int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
37 };
38 
39 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
40 
41 static struct kmem_cache *secpath_cachep __ro_after_init;
42 
43 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
44 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
45 
46 static struct gro_cells gro_cells;
47 static struct net_device xfrm_napi_dev;
48 
49 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
50 
xfrm_input_register_afinfo(const struct xfrm_input_afinfo * afinfo)51 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
52 {
53 	int err = 0;
54 
55 	if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
56 		return -EAFNOSUPPORT;
57 
58 	spin_lock_bh(&xfrm_input_afinfo_lock);
59 	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
60 		err = -EEXIST;
61 	else
62 		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
63 	spin_unlock_bh(&xfrm_input_afinfo_lock);
64 	return err;
65 }
66 EXPORT_SYMBOL(xfrm_input_register_afinfo);
67 
xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo * afinfo)68 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
69 {
70 	int err = 0;
71 
72 	spin_lock_bh(&xfrm_input_afinfo_lock);
73 	if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
74 		if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
75 			err = -EINVAL;
76 		else
77 			RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
78 	}
79 	spin_unlock_bh(&xfrm_input_afinfo_lock);
80 	synchronize_rcu();
81 	return err;
82 }
83 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
84 
xfrm_input_get_afinfo(unsigned int family)85 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
86 {
87 	const struct xfrm_input_afinfo *afinfo;
88 
89 	if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
90 		return NULL;
91 
92 	rcu_read_lock();
93 	afinfo = rcu_dereference(xfrm_input_afinfo[family]);
94 	if (unlikely(!afinfo))
95 		rcu_read_unlock();
96 	return afinfo;
97 }
98 
xfrm_rcv_cb(struct sk_buff * skb,unsigned int family,u8 protocol,int err)99 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
100 		       int err)
101 {
102 	int ret;
103 	const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
104 
105 	if (!afinfo)
106 		return -EAFNOSUPPORT;
107 
108 	ret = afinfo->callback(skb, protocol, err);
109 	rcu_read_unlock();
110 
111 	return ret;
112 }
113 
__secpath_destroy(struct sec_path * sp)114 void __secpath_destroy(struct sec_path *sp)
115 {
116 	int i;
117 	for (i = 0; i < sp->len; i++)
118 		xfrm_state_put(sp->xvec[i]);
119 	kmem_cache_free(secpath_cachep, sp);
120 }
121 EXPORT_SYMBOL(__secpath_destroy);
122 
secpath_dup(struct sec_path * src)123 struct sec_path *secpath_dup(struct sec_path *src)
124 {
125 	struct sec_path *sp;
126 
127 	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
128 	if (!sp)
129 		return NULL;
130 
131 	sp->len = 0;
132 	sp->olen = 0;
133 
134 	memset(sp->ovec, 0, sizeof(sp->ovec));
135 
136 	if (src) {
137 		int i;
138 
139 		memcpy(sp, src, sizeof(*sp));
140 		for (i = 0; i < sp->len; i++)
141 			xfrm_state_hold(sp->xvec[i]);
142 	}
143 	refcount_set(&sp->refcnt, 1);
144 	return sp;
145 }
146 EXPORT_SYMBOL(secpath_dup);
147 
secpath_set(struct sk_buff * skb)148 int secpath_set(struct sk_buff *skb)
149 {
150 	struct sec_path *sp;
151 
152 	/* Allocate new secpath or COW existing one. */
153 	if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) {
154 		sp = secpath_dup(skb->sp);
155 		if (!sp)
156 			return -ENOMEM;
157 
158 		if (skb->sp)
159 			secpath_put(skb->sp);
160 		skb->sp = sp;
161 	}
162 	return 0;
163 }
164 EXPORT_SYMBOL(secpath_set);
165 
166 /* Fetch spi and seq from ipsec header */
167 
xfrm_parse_spi(struct sk_buff * skb,u8 nexthdr,__be32 * spi,__be32 * seq)168 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
169 {
170 	int offset, offset_seq;
171 	int hlen;
172 
173 	switch (nexthdr) {
174 	case IPPROTO_AH:
175 		hlen = sizeof(struct ip_auth_hdr);
176 		offset = offsetof(struct ip_auth_hdr, spi);
177 		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
178 		break;
179 	case IPPROTO_ESP:
180 		hlen = sizeof(struct ip_esp_hdr);
181 		offset = offsetof(struct ip_esp_hdr, spi);
182 		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
183 		break;
184 	case IPPROTO_COMP:
185 		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
186 			return -EINVAL;
187 		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
188 		*seq = 0;
189 		return 0;
190 	default:
191 		return 1;
192 	}
193 
194 	if (!pskb_may_pull(skb, hlen))
195 		return -EINVAL;
196 
197 	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
198 	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
199 	return 0;
200 }
201 EXPORT_SYMBOL(xfrm_parse_spi);
202 
xfrm_prepare_input(struct xfrm_state * x,struct sk_buff * skb)203 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
204 {
205 	struct xfrm_mode *inner_mode = x->inner_mode;
206 	int err;
207 
208 	err = x->outer_mode->afinfo->extract_input(x, skb);
209 	if (err)
210 		return err;
211 
212 	if (x->sel.family == AF_UNSPEC) {
213 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
214 		if (inner_mode == NULL)
215 			return -EAFNOSUPPORT;
216 	}
217 
218 	skb->protocol = inner_mode->afinfo->eth_proto;
219 	return inner_mode->input2(x, skb);
220 }
221 EXPORT_SYMBOL(xfrm_prepare_input);
222 
xfrm_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type)223 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
224 {
225 	struct net *net = dev_net(skb->dev);
226 	int err;
227 	__be32 seq;
228 	__be32 seq_hi;
229 	struct xfrm_state *x = NULL;
230 	xfrm_address_t *daddr;
231 	struct xfrm_mode *inner_mode;
232 	u32 mark = skb->mark;
233 	unsigned int family = AF_UNSPEC;
234 	int decaps = 0;
235 	int async = 0;
236 	bool xfrm_gro = false;
237 	bool crypto_done = false;
238 	struct xfrm_offload *xo = xfrm_offload(skb);
239 
240 	if (encap_type < 0) {
241 		x = xfrm_input_state(skb);
242 
243 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
244 			if (x->km.state == XFRM_STATE_ACQ)
245 				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
246 			else
247 				XFRM_INC_STATS(net,
248 					       LINUX_MIB_XFRMINSTATEINVALID);
249 
250 			if (encap_type == -1)
251 				dev_put(skb->dev);
252 			goto drop;
253 		}
254 
255 		family = x->outer_mode->afinfo->family;
256 
257 		/* An encap_type of -1 indicates async resumption. */
258 		if (encap_type == -1) {
259 			async = 1;
260 			seq = XFRM_SKB_CB(skb)->seq.input.low;
261 			goto resume;
262 		}
263 
264 		/* encap_type < -1 indicates a GRO call. */
265 		encap_type = 0;
266 		seq = XFRM_SPI_SKB_CB(skb)->seq;
267 
268 		if (xo && (xo->flags & CRYPTO_DONE)) {
269 			crypto_done = true;
270 			family = XFRM_SPI_SKB_CB(skb)->family;
271 
272 			if (!(xo->status & CRYPTO_SUCCESS)) {
273 				if (xo->status &
274 				    (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
275 				     CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
276 				     CRYPTO_TUNNEL_AH_AUTH_FAILED |
277 				     CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
278 
279 					xfrm_audit_state_icvfail(x, skb,
280 								 x->type->proto);
281 					x->stats.integrity_failed++;
282 					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
283 					goto drop;
284 				}
285 
286 				if (xo->status & CRYPTO_INVALID_PROTOCOL) {
287 					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
288 					goto drop;
289 				}
290 
291 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
292 				goto drop;
293 			}
294 
295 			if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
296 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
297 				goto drop;
298 			}
299 		}
300 
301 		goto lock;
302 	}
303 
304 	family = XFRM_SPI_SKB_CB(skb)->family;
305 
306 	/* if tunnel is present override skb->mark value with tunnel i_key */
307 	switch (family) {
308 	case AF_INET:
309 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
310 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
311 		break;
312 	case AF_INET6:
313 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
314 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
315 		break;
316 	}
317 
318 	err = secpath_set(skb);
319 	if (err) {
320 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
321 		goto drop;
322 	}
323 
324 	seq = 0;
325 	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
326 		secpath_reset(skb);
327 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
328 		goto drop;
329 	}
330 
331 	daddr = (xfrm_address_t *)(skb_network_header(skb) +
332 				   XFRM_SPI_SKB_CB(skb)->daddroff);
333 	do {
334 		if (skb->sp->len == XFRM_MAX_DEPTH) {
335 			secpath_reset(skb);
336 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
337 			goto drop;
338 		}
339 
340 		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
341 		if (x == NULL) {
342 			secpath_reset(skb);
343 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
344 			xfrm_audit_state_notfound(skb, family, spi, seq);
345 			goto drop;
346 		}
347 
348 		skb->mark = xfrm_smark_get(skb->mark, x);
349 
350 		skb->sp->xvec[skb->sp->len++] = x;
351 
352 		skb_dst_force(skb);
353 		if (!skb_dst(skb)) {
354 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
355 			goto drop;
356 		}
357 
358 lock:
359 		spin_lock(&x->lock);
360 
361 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
362 			if (x->km.state == XFRM_STATE_ACQ)
363 				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
364 			else
365 				XFRM_INC_STATS(net,
366 					       LINUX_MIB_XFRMINSTATEINVALID);
367 			goto drop_unlock;
368 		}
369 
370 		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
371 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
372 			goto drop_unlock;
373 		}
374 
375 		if (x->repl->check(x, skb, seq)) {
376 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
377 			goto drop_unlock;
378 		}
379 
380 		if (xfrm_state_check_expire(x)) {
381 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
382 			goto drop_unlock;
383 		}
384 
385 		spin_unlock(&x->lock);
386 
387 		if (xfrm_tunnel_check(skb, x, family)) {
388 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
389 			goto drop;
390 		}
391 
392 		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
393 
394 		XFRM_SKB_CB(skb)->seq.input.low = seq;
395 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
396 
397 		dev_hold(skb->dev);
398 
399 		if (crypto_done)
400 			nexthdr = x->type_offload->input_tail(x, skb);
401 		else
402 			nexthdr = x->type->input(x, skb);
403 
404 		if (nexthdr == -EINPROGRESS)
405 			return 0;
406 resume:
407 		dev_put(skb->dev);
408 
409 		spin_lock(&x->lock);
410 		if (nexthdr < 0) {
411 			if (nexthdr == -EBADMSG) {
412 				xfrm_audit_state_icvfail(x, skb,
413 							 x->type->proto);
414 				x->stats.integrity_failed++;
415 			}
416 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
417 			goto drop_unlock;
418 		}
419 
420 		/* only the first xfrm gets the encap type */
421 		encap_type = 0;
422 
423 		if (async && x->repl->recheck(x, skb, seq)) {
424 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
425 			goto drop_unlock;
426 		}
427 
428 		x->repl->advance(x, seq);
429 
430 		x->curlft.bytes += skb->len;
431 		x->curlft.packets++;
432 
433 		spin_unlock(&x->lock);
434 
435 		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
436 
437 		inner_mode = x->inner_mode;
438 
439 		if (x->sel.family == AF_UNSPEC) {
440 			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
441 			if (inner_mode == NULL) {
442 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
443 				goto drop;
444 			}
445 		}
446 
447 		if (inner_mode->input(x, skb)) {
448 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
449 			goto drop;
450 		}
451 
452 		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
453 			decaps = 1;
454 			break;
455 		}
456 
457 		/*
458 		 * We need the inner address.  However, we only get here for
459 		 * transport mode so the outer address is identical.
460 		 */
461 		daddr = &x->id.daddr;
462 		family = x->outer_mode->afinfo->family;
463 
464 		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
465 		if (err < 0) {
466 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
467 			goto drop;
468 		}
469 		crypto_done = false;
470 	} while (!err);
471 
472 	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
473 	if (err)
474 		goto drop;
475 
476 	nf_reset(skb);
477 
478 	if (decaps) {
479 		if (skb->sp)
480 			skb->sp->olen = 0;
481 		skb_dst_drop(skb);
482 		gro_cells_receive(&gro_cells, skb);
483 		return 0;
484 	} else {
485 		xo = xfrm_offload(skb);
486 		if (xo)
487 			xfrm_gro = xo->flags & XFRM_GRO;
488 
489 		err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
490 		if (xfrm_gro) {
491 			if (skb->sp)
492 				skb->sp->olen = 0;
493 			skb_dst_drop(skb);
494 			gro_cells_receive(&gro_cells, skb);
495 			return err;
496 		}
497 
498 		return err;
499 	}
500 
501 drop_unlock:
502 	spin_unlock(&x->lock);
503 drop:
504 	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
505 	kfree_skb(skb);
506 	return 0;
507 }
508 EXPORT_SYMBOL(xfrm_input);
509 
xfrm_input_resume(struct sk_buff * skb,int nexthdr)510 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
511 {
512 	return xfrm_input(skb, nexthdr, 0, -1);
513 }
514 EXPORT_SYMBOL(xfrm_input_resume);
515 
xfrm_trans_reinject(unsigned long data)516 static void xfrm_trans_reinject(unsigned long data)
517 {
518 	struct xfrm_trans_tasklet *trans = (void *)data;
519 	struct sk_buff_head queue;
520 	struct sk_buff *skb;
521 
522 	__skb_queue_head_init(&queue);
523 	skb_queue_splice_init(&trans->queue, &queue);
524 
525 	while ((skb = __skb_dequeue(&queue)))
526 		XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
527 }
528 
xfrm_trans_queue(struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *))529 int xfrm_trans_queue(struct sk_buff *skb,
530 		     int (*finish)(struct net *, struct sock *,
531 				   struct sk_buff *))
532 {
533 	struct xfrm_trans_tasklet *trans;
534 
535 	trans = this_cpu_ptr(&xfrm_trans_tasklet);
536 
537 	if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
538 		return -ENOBUFS;
539 
540 	XFRM_TRANS_SKB_CB(skb)->finish = finish;
541 	__skb_queue_tail(&trans->queue, skb);
542 	tasklet_schedule(&trans->tasklet);
543 	return 0;
544 }
545 EXPORT_SYMBOL(xfrm_trans_queue);
546 
xfrm_input_init(void)547 void __init xfrm_input_init(void)
548 {
549 	int err;
550 	int i;
551 
552 	init_dummy_netdev(&xfrm_napi_dev);
553 	err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
554 	if (err)
555 		gro_cells.cells = NULL;
556 
557 	secpath_cachep = kmem_cache_create("secpath_cache",
558 					   sizeof(struct sec_path),
559 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
560 					   NULL);
561 
562 	for_each_possible_cpu(i) {
563 		struct xfrm_trans_tasklet *trans;
564 
565 		trans = &per_cpu(xfrm_trans_tasklet, i);
566 		__skb_queue_head_init(&trans->queue);
567 		tasklet_init(&trans->tasklet, xfrm_trans_reinject,
568 			     (unsigned long)trans);
569 	}
570 }
571