• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "IPsec: " fmt
3 
4 #include <crypto/aead.h>
5 #include <crypto/authenc.h>
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <net/ip.h>
9 #include <net/xfrm.h>
10 #include <net/esp.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kernel.h>
13 #include <linux/pfkeyv2.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/in6.h>
18 #include <net/icmp.h>
19 #include <net/protocol.h>
20 #include <net/udp.h>
21 #include <net/tcp.h>
22 #include <net/espintcp.h>
23 
24 #include <linux/highmem.h>
25 
26 struct esp_skb_cb {
27 	struct xfrm_skb_cb xfrm;
28 	void *tmp;
29 };
30 
31 struct esp_output_extra {
32 	__be32 seqhi;
33 	u32 esphoff;
34 };
35 
36 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
37 
38 /*
39  * Allocate an AEAD request structure with extra space for SG and IV.
40  *
41  * For alignment considerations the IV is placed at the front, followed
42  * by the request and finally the SG list.
43  *
44  * TODO: Use spare space in skb for this where possible.
45  */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int extralen)46 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
47 {
48 	unsigned int len;
49 
50 	len = extralen;
51 
52 	len += crypto_aead_ivsize(aead);
53 
54 	if (len) {
55 		len += crypto_aead_alignmask(aead) &
56 		       ~(crypto_tfm_ctx_alignment() - 1);
57 		len = ALIGN(len, crypto_tfm_ctx_alignment());
58 	}
59 
60 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
61 	len = ALIGN(len, __alignof__(struct scatterlist));
62 
63 	len += sizeof(struct scatterlist) * nfrags;
64 
65 	return kmalloc(len, GFP_ATOMIC);
66 }
67 
esp_tmp_extra(void * tmp)68 static inline void *esp_tmp_extra(void *tmp)
69 {
70 	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
71 }
72 
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int extralen)73 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
74 {
75 	return crypto_aead_ivsize(aead) ?
76 	       PTR_ALIGN((u8 *)tmp + extralen,
77 			 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
78 }
79 
esp_tmp_req(struct crypto_aead * aead,u8 * iv)80 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
81 {
82 	struct aead_request *req;
83 
84 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
85 				crypto_tfm_ctx_alignment());
86 	aead_request_set_tfm(req, aead);
87 	return req;
88 }
89 
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)90 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
91 					     struct aead_request *req)
92 {
93 	return (void *)ALIGN((unsigned long)(req + 1) +
94 			     crypto_aead_reqsize(aead),
95 			     __alignof__(struct scatterlist));
96 }
97 
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)98 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
99 {
100 	struct crypto_aead *aead = x->data;
101 	int extralen = 0;
102 	u8 *iv;
103 	struct aead_request *req;
104 	struct scatterlist *sg;
105 
106 	if (x->props.flags & XFRM_STATE_ESN)
107 		extralen += sizeof(struct esp_output_extra);
108 
109 	iv = esp_tmp_iv(aead, tmp, extralen);
110 	req = esp_tmp_req(aead, iv);
111 
112 	/* Unref skb_frag_pages in the src scatterlist if necessary.
113 	 * Skip the first sg which comes from skb->data.
114 	 */
115 	if (req->src != req->dst)
116 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
117 			skb_page_unref(skb, sg_page(sg), false);
118 }
119 
120 #ifdef CONFIG_INET_ESPINTCP
esp_find_tcp_sk(struct xfrm_state * x)121 static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
122 {
123 	struct xfrm_encap_tmpl *encap = x->encap;
124 	struct net *net = xs_net(x);
125 	__be16 sport, dport;
126 	struct sock *sk;
127 
128 	spin_lock_bh(&x->lock);
129 	sport = encap->encap_sport;
130 	dport = encap->encap_dport;
131 	spin_unlock_bh(&x->lock);
132 
133 	sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
134 				     dport, x->props.saddr.a4, sport, 0);
135 	if (!sk)
136 		return ERR_PTR(-ENOENT);
137 
138 	if (!tcp_is_ulp_esp(sk)) {
139 		sock_put(sk);
140 		return ERR_PTR(-EINVAL);
141 	}
142 
143 	return sk;
144 }
145 
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)146 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
147 {
148 	struct sock *sk;
149 	int err;
150 
151 	rcu_read_lock();
152 
153 	sk = esp_find_tcp_sk(x);
154 	err = PTR_ERR_OR_ZERO(sk);
155 	if (err) {
156 		kfree_skb(skb);
157 		goto out;
158 	}
159 
160 	bh_lock_sock(sk);
161 	if (sock_owned_by_user(sk))
162 		err = espintcp_queue_out(sk, skb);
163 	else
164 		err = espintcp_push_skb(sk, skb);
165 	bh_unlock_sock(sk);
166 
167 	sock_put(sk);
168 
169 out:
170 	rcu_read_unlock();
171 	return err;
172 }
173 
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)174 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
175 				   struct sk_buff *skb)
176 {
177 	struct dst_entry *dst = skb_dst(skb);
178 	struct xfrm_state *x = dst->xfrm;
179 
180 	return esp_output_tcp_finish(x, skb);
181 }
182 
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)183 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
184 {
185 	int err;
186 
187 	local_bh_disable();
188 	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
189 	local_bh_enable();
190 
191 	/* EINPROGRESS just happens to do the right thing.  It
192 	 * actually means that the skb has been consumed and
193 	 * isn't coming back.
194 	 */
195 	return err ?: -EINPROGRESS;
196 }
197 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)198 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
199 {
200 	WARN_ON(1);
201 	return -EOPNOTSUPP;
202 }
203 #endif
204 
esp_output_done(void * data,int err)205 static void esp_output_done(void *data, int err)
206 {
207 	struct sk_buff *skb = data;
208 	struct xfrm_offload *xo = xfrm_offload(skb);
209 	void *tmp;
210 	struct xfrm_state *x;
211 
212 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
213 		struct sec_path *sp = skb_sec_path(skb);
214 
215 		x = sp->xvec[sp->len - 1];
216 	} else {
217 		x = skb_dst(skb)->xfrm;
218 	}
219 
220 	tmp = ESP_SKB_CB(skb)->tmp;
221 	esp_ssg_unref(x, tmp, skb);
222 	kfree(tmp);
223 
224 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
225 		if (err) {
226 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
227 			kfree_skb(skb);
228 			return;
229 		}
230 
231 		skb_push(skb, skb->data - skb_mac_header(skb));
232 		secpath_reset(skb);
233 		xfrm_dev_resume(skb);
234 	} else {
235 		if (!err &&
236 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
237 			esp_output_tail_tcp(x, skb);
238 		else
239 			xfrm_output_resume(skb->sk, skb, err);
240 	}
241 }
242 
243 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)244 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
245 {
246 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
247 	void *tmp = ESP_SKB_CB(skb)->tmp;
248 	__be32 *seqhi = esp_tmp_extra(tmp);
249 
250 	esph->seq_no = esph->spi;
251 	esph->spi = *seqhi;
252 }
253 
esp_output_restore_header(struct sk_buff * skb)254 static void esp_output_restore_header(struct sk_buff *skb)
255 {
256 	void *tmp = ESP_SKB_CB(skb)->tmp;
257 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
258 
259 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
260 				sizeof(__be32));
261 }
262 
esp_output_set_extra(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)263 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
264 					       struct xfrm_state *x,
265 					       struct ip_esp_hdr *esph,
266 					       struct esp_output_extra *extra)
267 {
268 	/* For ESN we move the header forward by 4 bytes to
269 	 * accommodate the high bits.  We will move it back after
270 	 * encryption.
271 	 */
272 	if ((x->props.flags & XFRM_STATE_ESN)) {
273 		__u32 seqhi;
274 		struct xfrm_offload *xo = xfrm_offload(skb);
275 
276 		if (xo)
277 			seqhi = xo->seq.hi;
278 		else
279 			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
280 
281 		extra->esphoff = (unsigned char *)esph -
282 				 skb_transport_header(skb);
283 		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
284 		extra->seqhi = esph->spi;
285 		esph->seq_no = htonl(seqhi);
286 	}
287 
288 	esph->spi = x->id.spi;
289 
290 	return esph;
291 }
292 
esp_output_done_esn(void * data,int err)293 static void esp_output_done_esn(void *data, int err)
294 {
295 	struct sk_buff *skb = data;
296 
297 	esp_output_restore_header(skb);
298 	esp_output_done(data, err);
299 }
300 
esp_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)301 static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
302 					       int encap_type,
303 					       struct esp_info *esp,
304 					       __be16 sport,
305 					       __be16 dport)
306 {
307 	struct udphdr *uh;
308 	__be32 *udpdata32;
309 	unsigned int len;
310 
311 	len = skb->len + esp->tailen - skb_transport_offset(skb);
312 	if (len + sizeof(struct iphdr) > IP_MAX_MTU)
313 		return ERR_PTR(-EMSGSIZE);
314 
315 	uh = (struct udphdr *)esp->esph;
316 	uh->source = sport;
317 	uh->dest = dport;
318 	uh->len = htons(len);
319 	uh->check = 0;
320 
321 	*skb_mac_header(skb) = IPPROTO_UDP;
322 
323 	if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
324 		udpdata32 = (__be32 *)(uh + 1);
325 		udpdata32[0] = udpdata32[1] = 0;
326 		return (struct ip_esp_hdr *)(udpdata32 + 2);
327 	}
328 
329 	return (struct ip_esp_hdr *)(uh + 1);
330 }
331 
332 #ifdef CONFIG_INET_ESPINTCP
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)333 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
334 						    struct sk_buff *skb,
335 						    struct esp_info *esp)
336 {
337 	__be16 *lenp = (void *)esp->esph;
338 	struct ip_esp_hdr *esph;
339 	unsigned int len;
340 	struct sock *sk;
341 
342 	len = skb->len + esp->tailen - skb_transport_offset(skb);
343 	if (len > IP_MAX_MTU)
344 		return ERR_PTR(-EMSGSIZE);
345 
346 	rcu_read_lock();
347 	sk = esp_find_tcp_sk(x);
348 	rcu_read_unlock();
349 
350 	if (IS_ERR(sk))
351 		return ERR_CAST(sk);
352 
353 	sock_put(sk);
354 
355 	*lenp = htons(len);
356 	esph = (struct ip_esp_hdr *)(lenp + 1);
357 
358 	return esph;
359 }
360 #else
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)361 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
362 						    struct sk_buff *skb,
363 						    struct esp_info *esp)
364 {
365 	return ERR_PTR(-EOPNOTSUPP);
366 }
367 #endif
368 
esp_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)369 static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
370 			    struct esp_info *esp)
371 {
372 	struct xfrm_encap_tmpl *encap = x->encap;
373 	struct ip_esp_hdr *esph;
374 	__be16 sport, dport;
375 	int encap_type;
376 
377 	spin_lock_bh(&x->lock);
378 	sport = encap->encap_sport;
379 	dport = encap->encap_dport;
380 	encap_type = encap->encap_type;
381 	spin_unlock_bh(&x->lock);
382 
383 	switch (encap_type) {
384 	default:
385 	case UDP_ENCAP_ESPINUDP:
386 	case UDP_ENCAP_ESPINUDP_NON_IKE:
387 		esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
388 		break;
389 	case TCP_ENCAP_ESPINTCP:
390 		esph = esp_output_tcp_encap(x, skb, esp);
391 		break;
392 	}
393 
394 	if (IS_ERR(esph))
395 		return PTR_ERR(esph);
396 
397 	esp->esph = esph;
398 
399 	return 0;
400 }
401 
esp_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)402 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
403 {
404 	u8 *tail;
405 	int nfrags;
406 	int esph_offset;
407 	struct page *page;
408 	struct sk_buff *trailer;
409 	int tailen = esp->tailen;
410 
411 	/* this is non-NULL only with TCP/UDP Encapsulation */
412 	if (x->encap) {
413 		int err = esp_output_encap(x, skb, esp);
414 
415 		if (err < 0)
416 			return err;
417 	}
418 
419 	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
420 	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
421 		goto cow;
422 
423 	if (!skb_cloned(skb)) {
424 		if (tailen <= skb_tailroom(skb)) {
425 			nfrags = 1;
426 			trailer = skb;
427 			tail = skb_tail_pointer(trailer);
428 
429 			goto skip_cow;
430 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
431 			   && !skb_has_frag_list(skb)) {
432 			int allocsize;
433 			struct sock *sk = skb->sk;
434 			struct page_frag *pfrag = &x->xfrag;
435 
436 			esp->inplace = false;
437 
438 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
439 
440 			spin_lock_bh(&x->lock);
441 
442 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
443 				spin_unlock_bh(&x->lock);
444 				goto cow;
445 			}
446 
447 			page = pfrag->page;
448 			get_page(page);
449 
450 			tail = page_address(page) + pfrag->offset;
451 
452 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
453 
454 			nfrags = skb_shinfo(skb)->nr_frags;
455 
456 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
457 					     tailen);
458 			skb_shinfo(skb)->nr_frags = ++nfrags;
459 
460 			pfrag->offset = pfrag->offset + allocsize;
461 
462 			spin_unlock_bh(&x->lock);
463 
464 			nfrags++;
465 
466 			skb_len_add(skb, tailen);
467 			if (sk && sk_fullsock(sk))
468 				refcount_add(tailen, &sk->sk_wmem_alloc);
469 
470 			goto out;
471 		}
472 	}
473 
474 cow:
475 	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
476 
477 	nfrags = skb_cow_data(skb, tailen, &trailer);
478 	if (nfrags < 0)
479 		goto out;
480 	tail = skb_tail_pointer(trailer);
481 	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
482 
483 skip_cow:
484 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
485 	pskb_put(skb, trailer, tailen);
486 
487 out:
488 	return nfrags;
489 }
490 EXPORT_SYMBOL_GPL(esp_output_head);
491 
esp_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)492 int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
493 {
494 	u8 *iv;
495 	int alen;
496 	void *tmp;
497 	int ivlen;
498 	int assoclen;
499 	int extralen;
500 	struct page *page;
501 	struct ip_esp_hdr *esph;
502 	struct crypto_aead *aead;
503 	struct aead_request *req;
504 	struct scatterlist *sg, *dsg;
505 	struct esp_output_extra *extra;
506 	int err = -ENOMEM;
507 
508 	assoclen = sizeof(struct ip_esp_hdr);
509 	extralen = 0;
510 
511 	if (x->props.flags & XFRM_STATE_ESN) {
512 		extralen += sizeof(*extra);
513 		assoclen += sizeof(__be32);
514 	}
515 
516 	aead = x->data;
517 	alen = crypto_aead_authsize(aead);
518 	ivlen = crypto_aead_ivsize(aead);
519 
520 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
521 	if (!tmp)
522 		goto error;
523 
524 	extra = esp_tmp_extra(tmp);
525 	iv = esp_tmp_iv(aead, tmp, extralen);
526 	req = esp_tmp_req(aead, iv);
527 	sg = esp_req_sg(aead, req);
528 
529 	if (esp->inplace)
530 		dsg = sg;
531 	else
532 		dsg = &sg[esp->nfrags];
533 
534 	esph = esp_output_set_extra(skb, x, esp->esph, extra);
535 	esp->esph = esph;
536 
537 	sg_init_table(sg, esp->nfrags);
538 	err = skb_to_sgvec(skb, sg,
539 		           (unsigned char *)esph - skb->data,
540 		           assoclen + ivlen + esp->clen + alen);
541 	if (unlikely(err < 0))
542 		goto error_free;
543 
544 	if (!esp->inplace) {
545 		int allocsize;
546 		struct page_frag *pfrag = &x->xfrag;
547 
548 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
549 
550 		spin_lock_bh(&x->lock);
551 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
552 			spin_unlock_bh(&x->lock);
553 			goto error_free;
554 		}
555 
556 		skb_shinfo(skb)->nr_frags = 1;
557 
558 		page = pfrag->page;
559 		get_page(page);
560 		/* replace page frags in skb with new page */
561 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
562 		pfrag->offset = pfrag->offset + allocsize;
563 		spin_unlock_bh(&x->lock);
564 
565 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
566 		err = skb_to_sgvec(skb, dsg,
567 			           (unsigned char *)esph - skb->data,
568 			           assoclen + ivlen + esp->clen + alen);
569 		if (unlikely(err < 0))
570 			goto error_free;
571 	}
572 
573 	if ((x->props.flags & XFRM_STATE_ESN))
574 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
575 	else
576 		aead_request_set_callback(req, 0, esp_output_done, skb);
577 
578 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
579 	aead_request_set_ad(req, assoclen);
580 
581 	memset(iv, 0, ivlen);
582 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
583 	       min(ivlen, 8));
584 
585 	ESP_SKB_CB(skb)->tmp = tmp;
586 	err = crypto_aead_encrypt(req);
587 
588 	switch (err) {
589 	case -EINPROGRESS:
590 		goto error;
591 
592 	case -ENOSPC:
593 		err = NET_XMIT_DROP;
594 		break;
595 
596 	case 0:
597 		if ((x->props.flags & XFRM_STATE_ESN))
598 			esp_output_restore_header(skb);
599 	}
600 
601 	if (sg != dsg)
602 		esp_ssg_unref(x, tmp, skb);
603 
604 	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
605 		err = esp_output_tail_tcp(x, skb);
606 
607 error_free:
608 	kfree(tmp);
609 error:
610 	return err;
611 }
612 EXPORT_SYMBOL_GPL(esp_output_tail);
613 
esp_output(struct xfrm_state * x,struct sk_buff * skb)614 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
615 {
616 	int alen;
617 	int blksize;
618 	struct ip_esp_hdr *esph;
619 	struct crypto_aead *aead;
620 	struct esp_info esp;
621 
622 	esp.inplace = true;
623 
624 	esp.proto = *skb_mac_header(skb);
625 	*skb_mac_header(skb) = IPPROTO_ESP;
626 
627 	/* skb is pure payload to encrypt */
628 
629 	aead = x->data;
630 	alen = crypto_aead_authsize(aead);
631 
632 	esp.tfclen = 0;
633 	if (x->tfcpad) {
634 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
635 		u32 padto;
636 
637 		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
638 		if (skb->len < padto)
639 			esp.tfclen = padto - skb->len;
640 	}
641 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
642 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
643 	esp.plen = esp.clen - skb->len - esp.tfclen;
644 	esp.tailen = esp.tfclen + esp.plen + alen;
645 
646 	esp.esph = ip_esp_hdr(skb);
647 
648 	esp.nfrags = esp_output_head(x, skb, &esp);
649 	if (esp.nfrags < 0)
650 		return esp.nfrags;
651 
652 	esph = esp.esph;
653 	esph->spi = x->id.spi;
654 
655 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
656 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
657 				 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
658 
659 	skb_push(skb, -skb_network_offset(skb));
660 
661 	return esp_output_tail(x, skb, &esp);
662 }
663 
esp_remove_trailer(struct sk_buff * skb)664 static inline int esp_remove_trailer(struct sk_buff *skb)
665 {
666 	struct xfrm_state *x = xfrm_input_state(skb);
667 	struct crypto_aead *aead = x->data;
668 	int alen, hlen, elen;
669 	int padlen, trimlen;
670 	__wsum csumdiff;
671 	u8 nexthdr[2];
672 	int ret;
673 
674 	alen = crypto_aead_authsize(aead);
675 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
676 	elen = skb->len - hlen;
677 
678 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
679 		BUG();
680 
681 	ret = -EINVAL;
682 	padlen = nexthdr[0];
683 	if (padlen + 2 + alen >= elen) {
684 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
685 				    padlen + 2, elen - alen);
686 		goto out;
687 	}
688 
689 	trimlen = alen + padlen + 2;
690 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
691 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
692 		skb->csum = csum_block_sub(skb->csum, csumdiff,
693 					   skb->len - trimlen);
694 	}
695 	ret = pskb_trim(skb, skb->len - trimlen);
696 	if (unlikely(ret))
697 		return ret;
698 
699 	ret = nexthdr[1];
700 
701 out:
702 	return ret;
703 }
704 
esp_input_done2(struct sk_buff * skb,int err)705 int esp_input_done2(struct sk_buff *skb, int err)
706 {
707 	const struct iphdr *iph;
708 	struct xfrm_state *x = xfrm_input_state(skb);
709 	struct xfrm_offload *xo = xfrm_offload(skb);
710 	struct crypto_aead *aead = x->data;
711 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
712 	int ihl;
713 
714 	if (!xo || !(xo->flags & CRYPTO_DONE))
715 		kfree(ESP_SKB_CB(skb)->tmp);
716 
717 	if (unlikely(err))
718 		goto out;
719 
720 	err = esp_remove_trailer(skb);
721 	if (unlikely(err < 0))
722 		goto out;
723 
724 	iph = ip_hdr(skb);
725 	ihl = iph->ihl * 4;
726 
727 	if (x->encap) {
728 		struct xfrm_encap_tmpl *encap = x->encap;
729 		struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
730 		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
731 		__be16 source;
732 
733 		switch (x->encap->encap_type) {
734 		case TCP_ENCAP_ESPINTCP:
735 			source = th->source;
736 			break;
737 		case UDP_ENCAP_ESPINUDP:
738 		case UDP_ENCAP_ESPINUDP_NON_IKE:
739 			source = uh->source;
740 			break;
741 		default:
742 			WARN_ON_ONCE(1);
743 			err = -EINVAL;
744 			goto out;
745 		}
746 
747 		/*
748 		 * 1) if the NAT-T peer's IP or port changed then
749 		 *    advertise the change to the keying daemon.
750 		 *    This is an inbound SA, so just compare
751 		 *    SRC ports.
752 		 */
753 		if (iph->saddr != x->props.saddr.a4 ||
754 		    source != encap->encap_sport) {
755 			xfrm_address_t ipaddr;
756 
757 			ipaddr.a4 = iph->saddr;
758 			km_new_mapping(x, &ipaddr, source);
759 
760 			/* XXX: perhaps add an extra
761 			 * policy check here, to see
762 			 * if we should allow or
763 			 * reject a packet from a
764 			 * different source
765 			 * address/port.
766 			 */
767 		}
768 
769 		/*
770 		 * 2) ignore UDP/TCP checksums in case
771 		 *    of NAT-T in Transport Mode, or
772 		 *    perform other post-processing fixes
773 		 *    as per draft-ietf-ipsec-udp-encaps-06,
774 		 *    section 3.1.2
775 		 */
776 		if (x->props.mode == XFRM_MODE_TRANSPORT)
777 			skb->ip_summed = CHECKSUM_UNNECESSARY;
778 	}
779 
780 	skb_pull_rcsum(skb, hlen);
781 	if (x->props.mode == XFRM_MODE_TUNNEL)
782 		skb_reset_transport_header(skb);
783 	else
784 		skb_set_transport_header(skb, -ihl);
785 
786 	/* RFC4303: Drop dummy packets without any error */
787 	if (err == IPPROTO_NONE)
788 		err = -EINVAL;
789 
790 out:
791 	return err;
792 }
793 EXPORT_SYMBOL_GPL(esp_input_done2);
794 
esp_input_done(void * data,int err)795 static void esp_input_done(void *data, int err)
796 {
797 	struct sk_buff *skb = data;
798 
799 	xfrm_input_resume(skb, esp_input_done2(skb, err));
800 }
801 
esp_input_restore_header(struct sk_buff * skb)802 static void esp_input_restore_header(struct sk_buff *skb)
803 {
804 	esp_restore_header(skb, 0);
805 	__skb_pull(skb, 4);
806 }
807 
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)808 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
809 {
810 	struct xfrm_state *x = xfrm_input_state(skb);
811 	struct ip_esp_hdr *esph;
812 
813 	/* For ESN we move the header forward by 4 bytes to
814 	 * accommodate the high bits.  We will move it back after
815 	 * decryption.
816 	 */
817 	if ((x->props.flags & XFRM_STATE_ESN)) {
818 		esph = skb_push(skb, 4);
819 		*seqhi = esph->spi;
820 		esph->spi = esph->seq_no;
821 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
822 	}
823 }
824 
esp_input_done_esn(void * data,int err)825 static void esp_input_done_esn(void *data, int err)
826 {
827 	struct sk_buff *skb = data;
828 
829 	esp_input_restore_header(skb);
830 	esp_input_done(data, err);
831 }
832 
833 /*
834  * Note: detecting truncated vs. non-truncated authentication data is very
835  * expensive, so we only support truncated data, which is the recommended
836  * and common case.
837  */
esp_input(struct xfrm_state * x,struct sk_buff * skb)838 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
839 {
840 	struct crypto_aead *aead = x->data;
841 	struct aead_request *req;
842 	struct sk_buff *trailer;
843 	int ivlen = crypto_aead_ivsize(aead);
844 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
845 	int nfrags;
846 	int assoclen;
847 	int seqhilen;
848 	__be32 *seqhi;
849 	void *tmp;
850 	u8 *iv;
851 	struct scatterlist *sg;
852 	int err = -EINVAL;
853 
854 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
855 		goto out;
856 
857 	if (elen <= 0)
858 		goto out;
859 
860 	assoclen = sizeof(struct ip_esp_hdr);
861 	seqhilen = 0;
862 
863 	if (x->props.flags & XFRM_STATE_ESN) {
864 		seqhilen += sizeof(__be32);
865 		assoclen += seqhilen;
866 	}
867 
868 	if (!skb_cloned(skb)) {
869 		if (!skb_is_nonlinear(skb)) {
870 			nfrags = 1;
871 
872 			goto skip_cow;
873 		} else if (!skb_has_frag_list(skb)) {
874 			nfrags = skb_shinfo(skb)->nr_frags;
875 			nfrags++;
876 
877 			goto skip_cow;
878 		}
879 	}
880 
881 	err = skb_cow_data(skb, 0, &trailer);
882 	if (err < 0)
883 		goto out;
884 
885 	nfrags = err;
886 
887 skip_cow:
888 	err = -ENOMEM;
889 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
890 	if (!tmp)
891 		goto out;
892 
893 	ESP_SKB_CB(skb)->tmp = tmp;
894 	seqhi = esp_tmp_extra(tmp);
895 	iv = esp_tmp_iv(aead, tmp, seqhilen);
896 	req = esp_tmp_req(aead, iv);
897 	sg = esp_req_sg(aead, req);
898 
899 	esp_input_set_header(skb, seqhi);
900 
901 	sg_init_table(sg, nfrags);
902 	err = skb_to_sgvec(skb, sg, 0, skb->len);
903 	if (unlikely(err < 0)) {
904 		kfree(tmp);
905 		goto out;
906 	}
907 
908 	skb->ip_summed = CHECKSUM_NONE;
909 
910 	if ((x->props.flags & XFRM_STATE_ESN))
911 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
912 	else
913 		aead_request_set_callback(req, 0, esp_input_done, skb);
914 
915 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
916 	aead_request_set_ad(req, assoclen);
917 
918 	err = crypto_aead_decrypt(req);
919 	if (err == -EINPROGRESS)
920 		goto out;
921 
922 	if ((x->props.flags & XFRM_STATE_ESN))
923 		esp_input_restore_header(skb);
924 
925 	err = esp_input_done2(skb, err);
926 
927 out:
928 	return err;
929 }
930 
esp4_err(struct sk_buff * skb,u32 info)931 static int esp4_err(struct sk_buff *skb, u32 info)
932 {
933 	struct net *net = dev_net(skb->dev);
934 	const struct iphdr *iph = (const struct iphdr *)skb->data;
935 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
936 	struct xfrm_state *x;
937 
938 	switch (icmp_hdr(skb)->type) {
939 	case ICMP_DEST_UNREACH:
940 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
941 			return 0;
942 		break;
943 	case ICMP_REDIRECT:
944 		break;
945 	default:
946 		return 0;
947 	}
948 
949 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
950 			      esph->spi, IPPROTO_ESP, AF_INET);
951 	if (!x)
952 		return 0;
953 
954 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
955 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
956 	else
957 		ipv4_redirect(skb, net, 0, IPPROTO_ESP);
958 	xfrm_state_put(x);
959 
960 	return 0;
961 }
962 
esp_destroy(struct xfrm_state * x)963 static void esp_destroy(struct xfrm_state *x)
964 {
965 	struct crypto_aead *aead = x->data;
966 
967 	if (!aead)
968 		return;
969 
970 	crypto_free_aead(aead);
971 }
972 
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)973 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
974 {
975 	char aead_name[CRYPTO_MAX_ALG_NAME];
976 	struct crypto_aead *aead;
977 	int err;
978 
979 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
980 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
981 		NL_SET_ERR_MSG(extack, "Algorithm name is too long");
982 		return -ENAMETOOLONG;
983 	}
984 
985 	aead = crypto_alloc_aead(aead_name, 0, 0);
986 	err = PTR_ERR(aead);
987 	if (IS_ERR(aead))
988 		goto error;
989 
990 	x->data = aead;
991 
992 	err = crypto_aead_setkey(aead, x->aead->alg_key,
993 				 (x->aead->alg_key_len + 7) / 8);
994 	if (err)
995 		goto error;
996 
997 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
998 	if (err)
999 		goto error;
1000 
1001 	return 0;
1002 
1003 error:
1004 	NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1005 	return err;
1006 }
1007 
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1008 static int esp_init_authenc(struct xfrm_state *x,
1009 			    struct netlink_ext_ack *extack)
1010 {
1011 	struct crypto_aead *aead;
1012 	struct crypto_authenc_key_param *param;
1013 	struct rtattr *rta;
1014 	char *key;
1015 	char *p;
1016 	char authenc_name[CRYPTO_MAX_ALG_NAME];
1017 	unsigned int keylen;
1018 	int err;
1019 
1020 	err = -ENAMETOOLONG;
1021 
1022 	if ((x->props.flags & XFRM_STATE_ESN)) {
1023 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1024 			     "%s%sauthencesn(%s,%s)%s",
1025 			     x->geniv ?: "", x->geniv ? "(" : "",
1026 			     x->aalg ? x->aalg->alg_name : "digest_null",
1027 			     x->ealg->alg_name,
1028 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1029 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1030 			goto error;
1031 		}
1032 	} else {
1033 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1034 			     "%s%sauthenc(%s,%s)%s",
1035 			     x->geniv ?: "", x->geniv ? "(" : "",
1036 			     x->aalg ? x->aalg->alg_name : "digest_null",
1037 			     x->ealg->alg_name,
1038 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1039 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1040 			goto error;
1041 		}
1042 	}
1043 
1044 	aead = crypto_alloc_aead(authenc_name, 0, 0);
1045 	err = PTR_ERR(aead);
1046 	if (IS_ERR(aead)) {
1047 		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1048 		goto error;
1049 	}
1050 
1051 	x->data = aead;
1052 
1053 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1054 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1055 	err = -ENOMEM;
1056 	key = kmalloc(keylen, GFP_KERNEL);
1057 	if (!key)
1058 		goto error;
1059 
1060 	p = key;
1061 	rta = (void *)p;
1062 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1063 	rta->rta_len = RTA_LENGTH(sizeof(*param));
1064 	param = RTA_DATA(rta);
1065 	p += RTA_SPACE(sizeof(*param));
1066 
1067 	if (x->aalg) {
1068 		struct xfrm_algo_desc *aalg_desc;
1069 
1070 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1071 		p += (x->aalg->alg_key_len + 7) / 8;
1072 
1073 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1074 		BUG_ON(!aalg_desc);
1075 
1076 		err = -EINVAL;
1077 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1078 		    crypto_aead_authsize(aead)) {
1079 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1080 			goto free_key;
1081 		}
1082 
1083 		err = crypto_aead_setauthsize(
1084 			aead, x->aalg->alg_trunc_len / 8);
1085 		if (err) {
1086 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1087 			goto free_key;
1088 		}
1089 	}
1090 
1091 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1092 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1093 
1094 	err = crypto_aead_setkey(aead, key, keylen);
1095 
1096 free_key:
1097 	kfree_sensitive(key);
1098 
1099 error:
1100 	return err;
1101 }
1102 
esp_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1103 static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1104 {
1105 	struct crypto_aead *aead;
1106 	u32 align;
1107 	int err;
1108 
1109 	x->data = NULL;
1110 
1111 	if (x->aead) {
1112 		err = esp_init_aead(x, extack);
1113 	} else if (x->ealg) {
1114 		err = esp_init_authenc(x, extack);
1115 	} else {
1116 		NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1117 		err = -EINVAL;
1118 	}
1119 
1120 	if (err)
1121 		goto error;
1122 
1123 	aead = x->data;
1124 
1125 	x->props.header_len = sizeof(struct ip_esp_hdr) +
1126 			      crypto_aead_ivsize(aead);
1127 	if (x->props.mode == XFRM_MODE_TUNNEL)
1128 		x->props.header_len += sizeof(struct iphdr);
1129 	else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
1130 		x->props.header_len += IPV4_BEET_PHMAXLEN;
1131 	if (x->encap) {
1132 		struct xfrm_encap_tmpl *encap = x->encap;
1133 
1134 		switch (encap->encap_type) {
1135 		default:
1136 			NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1137 			err = -EINVAL;
1138 			goto error;
1139 		case UDP_ENCAP_ESPINUDP:
1140 			x->props.header_len += sizeof(struct udphdr);
1141 			break;
1142 		case UDP_ENCAP_ESPINUDP_NON_IKE:
1143 			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1144 			break;
1145 #ifdef CONFIG_INET_ESPINTCP
1146 		case TCP_ENCAP_ESPINTCP:
1147 			/* only the length field, TCP encap is done by
1148 			 * the socket
1149 			 */
1150 			x->props.header_len += 2;
1151 			break;
1152 #endif
1153 		}
1154 	}
1155 
1156 	align = ALIGN(crypto_aead_blocksize(aead), 4);
1157 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1158 
1159 error:
1160 	return err;
1161 }
1162 
esp4_rcv_cb(struct sk_buff * skb,int err)1163 static int esp4_rcv_cb(struct sk_buff *skb, int err)
1164 {
1165 	return 0;
1166 }
1167 
1168 static const struct xfrm_type esp_type =
1169 {
1170 	.owner		= THIS_MODULE,
1171 	.proto	     	= IPPROTO_ESP,
1172 	.flags		= XFRM_TYPE_REPLAY_PROT,
1173 	.init_state	= esp_init_state,
1174 	.destructor	= esp_destroy,
1175 	.input		= esp_input,
1176 	.output		= esp_output,
1177 };
1178 
1179 static struct xfrm4_protocol esp4_protocol = {
1180 	.handler	=	xfrm4_rcv,
1181 	.input_handler	=	xfrm_input,
1182 	.cb_handler	=	esp4_rcv_cb,
1183 	.err_handler	=	esp4_err,
1184 	.priority	=	0,
1185 };
1186 
esp4_init(void)1187 static int __init esp4_init(void)
1188 {
1189 	if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1190 		pr_info("%s: can't add xfrm type\n", __func__);
1191 		return -EAGAIN;
1192 	}
1193 	if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1194 		pr_info("%s: can't add protocol\n", __func__);
1195 		xfrm_unregister_type(&esp_type, AF_INET);
1196 		return -EAGAIN;
1197 	}
1198 	return 0;
1199 }
1200 
esp4_fini(void)1201 static void __exit esp4_fini(void)
1202 {
1203 	if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1204 		pr_info("%s: can't remove protocol\n", __func__);
1205 	xfrm_unregister_type(&esp_type, AF_INET);
1206 }
1207 
1208 module_init(esp4_init);
1209 module_exit(esp4_fini);
1210 MODULE_LICENSE("GPL");
1211 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
1212