1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
5 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14 #define pr_fmt(fmt) "IPv6: " fmt
15
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
31 #include <net/icmp.h>
32 #include <net/ipv6.h>
33 #include <net/protocol.h>
34 #include <net/udp.h>
35 #include <linux/icmpv6.h>
36 #include <net/tcp.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39
40 #include <linux/highmem.h>
41
42 struct esp_skb_cb {
43 struct xfrm_skb_cb xfrm;
44 void *tmp;
45 };
46
47 struct esp_output_extra {
48 __be32 seqhi;
49 u32 esphoff;
50 };
51
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53
54 /*
55 * Allocate an AEAD request structure with extra space for SG and IV.
56 *
57 * For alignment considerations the upper 32 bits of the sequence number are
58 * placed at the front, if present. Followed by the IV, the request and finally
59 * the SG list.
60 *
61 * TODO: Use spare space in skb for this where possible.
62 */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int seqihlen)63 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
64 {
65 unsigned int len;
66
67 len = seqihlen;
68
69 len += crypto_aead_ivsize(aead);
70
71 if (len) {
72 len += crypto_aead_alignmask(aead) &
73 ~(crypto_tfm_ctx_alignment() - 1);
74 len = ALIGN(len, crypto_tfm_ctx_alignment());
75 }
76
77 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
78 len = ALIGN(len, __alignof__(struct scatterlist));
79
80 len += sizeof(struct scatterlist) * nfrags;
81
82 return kmalloc(len, GFP_ATOMIC);
83 }
84
esp_tmp_extra(void * tmp)85 static inline void *esp_tmp_extra(void *tmp)
86 {
87 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
88 }
89
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int seqhilen)90 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
91 {
92 return crypto_aead_ivsize(aead) ?
93 PTR_ALIGN((u8 *)tmp + seqhilen,
94 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
95 }
96
esp_tmp_req(struct crypto_aead * aead,u8 * iv)97 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
98 {
99 struct aead_request *req;
100
101 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
102 crypto_tfm_ctx_alignment());
103 aead_request_set_tfm(req, aead);
104 return req;
105 }
106
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)107 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
108 struct aead_request *req)
109 {
110 return (void *)ALIGN((unsigned long)(req + 1) +
111 crypto_aead_reqsize(aead),
112 __alignof__(struct scatterlist));
113 }
114
esp_ssg_unref(struct xfrm_state * x,void * tmp)115 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
116 {
117 struct esp_output_extra *extra = esp_tmp_extra(tmp);
118 struct crypto_aead *aead = x->data;
119 int extralen = 0;
120 u8 *iv;
121 struct aead_request *req;
122 struct scatterlist *sg;
123
124 if (x->props.flags & XFRM_STATE_ESN)
125 extralen += sizeof(*extra);
126
127 iv = esp_tmp_iv(aead, tmp, extralen);
128 req = esp_tmp_req(aead, iv);
129
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
132 */
133 if (req->src != req->dst)
134 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
135 put_page(sg_page(sg));
136 }
137
138 #ifdef CONFIG_INET6_ESPINTCP
139 struct esp_tcp_sk {
140 struct sock *sk;
141 struct rcu_head rcu;
142 };
143
esp_free_tcp_sk(struct rcu_head * head)144 static void esp_free_tcp_sk(struct rcu_head *head)
145 {
146 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
147
148 sock_put(esk->sk);
149 kfree(esk);
150 }
151
esp6_find_tcp_sk(struct xfrm_state * x)152 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
153 {
154 struct xfrm_encap_tmpl *encap = x->encap;
155 struct esp_tcp_sk *esk;
156 __be16 sport, dport;
157 struct sock *nsk;
158 struct sock *sk;
159
160 sk = rcu_dereference(x->encap_sk);
161 if (sk && sk->sk_state == TCP_ESTABLISHED)
162 return sk;
163
164 spin_lock_bh(&x->lock);
165 sport = encap->encap_sport;
166 dport = encap->encap_dport;
167 nsk = rcu_dereference_protected(x->encap_sk,
168 lockdep_is_held(&x->lock));
169 if (sk && sk == nsk) {
170 esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
171 if (!esk) {
172 spin_unlock_bh(&x->lock);
173 return ERR_PTR(-ENOMEM);
174 }
175 RCU_INIT_POINTER(x->encap_sk, NULL);
176 esk->sk = sk;
177 call_rcu(&esk->rcu, esp_free_tcp_sk);
178 }
179 spin_unlock_bh(&x->lock);
180
181 sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
182 dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
183 if (!sk)
184 return ERR_PTR(-ENOENT);
185
186 if (!tcp_is_ulp_esp(sk)) {
187 sock_put(sk);
188 return ERR_PTR(-EINVAL);
189 }
190
191 spin_lock_bh(&x->lock);
192 nsk = rcu_dereference_protected(x->encap_sk,
193 lockdep_is_held(&x->lock));
194 if (encap->encap_sport != sport ||
195 encap->encap_dport != dport) {
196 sock_put(sk);
197 sk = nsk ?: ERR_PTR(-EREMCHG);
198 } else if (sk == nsk) {
199 sock_put(sk);
200 } else {
201 rcu_assign_pointer(x->encap_sk, sk);
202 }
203 spin_unlock_bh(&x->lock);
204
205 return sk;
206 }
207
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)208 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
209 {
210 struct sock *sk;
211 int err;
212
213 rcu_read_lock();
214
215 sk = esp6_find_tcp_sk(x);
216 err = PTR_ERR_OR_ZERO(sk);
217 if (err)
218 goto out;
219
220 bh_lock_sock(sk);
221 if (sock_owned_by_user(sk))
222 err = espintcp_queue_out(sk, skb);
223 else
224 err = espintcp_push_skb(sk, skb);
225 bh_unlock_sock(sk);
226
227 out:
228 rcu_read_unlock();
229 return err;
230 }
231
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)232 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
233 struct sk_buff *skb)
234 {
235 struct dst_entry *dst = skb_dst(skb);
236 struct xfrm_state *x = dst->xfrm;
237
238 return esp_output_tcp_finish(x, skb);
239 }
240
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)241 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
242 {
243 int err;
244
245 local_bh_disable();
246 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
247 local_bh_enable();
248
249 /* EINPROGRESS just happens to do the right thing. It
250 * actually means that the skb has been consumed and
251 * isn't coming back.
252 */
253 return err ?: -EINPROGRESS;
254 }
255 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)256 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
257 {
258 kfree_skb(skb);
259
260 return -EOPNOTSUPP;
261 }
262 #endif
263
esp_output_encap_csum(struct sk_buff * skb)264 static void esp_output_encap_csum(struct sk_buff *skb)
265 {
266 /* UDP encap with IPv6 requires a valid checksum */
267 if (*skb_mac_header(skb) == IPPROTO_UDP) {
268 struct udphdr *uh = udp_hdr(skb);
269 struct ipv6hdr *ip6h = ipv6_hdr(skb);
270 int len = ntohs(uh->len);
271 unsigned int offset = skb_transport_offset(skb);
272 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
273
274 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
275 len, IPPROTO_UDP, csum);
276 if (uh->check == 0)
277 uh->check = CSUM_MANGLED_0;
278 }
279 }
280
esp_output_done(struct crypto_async_request * base,int err)281 static void esp_output_done(struct crypto_async_request *base, int err)
282 {
283 struct sk_buff *skb = base->data;
284 struct xfrm_offload *xo = xfrm_offload(skb);
285 void *tmp;
286 struct xfrm_state *x;
287
288 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
289 struct sec_path *sp = skb_sec_path(skb);
290
291 x = sp->xvec[sp->len - 1];
292 } else {
293 x = skb_dst(skb)->xfrm;
294 }
295
296 tmp = ESP_SKB_CB(skb)->tmp;
297 esp_ssg_unref(x, tmp);
298 kfree(tmp);
299
300 esp_output_encap_csum(skb);
301
302 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
303 if (err) {
304 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
305 kfree_skb(skb);
306 return;
307 }
308
309 skb_push(skb, skb->data - skb_mac_header(skb));
310 secpath_reset(skb);
311 xfrm_dev_resume(skb);
312 } else {
313 if (!err &&
314 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
315 esp_output_tail_tcp(x, skb);
316 else
317 xfrm_output_resume(skb, err);
318 }
319 }
320
321 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)322 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
323 {
324 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
325 void *tmp = ESP_SKB_CB(skb)->tmp;
326 __be32 *seqhi = esp_tmp_extra(tmp);
327
328 esph->seq_no = esph->spi;
329 esph->spi = *seqhi;
330 }
331
esp_output_restore_header(struct sk_buff * skb)332 static void esp_output_restore_header(struct sk_buff *skb)
333 {
334 void *tmp = ESP_SKB_CB(skb)->tmp;
335 struct esp_output_extra *extra = esp_tmp_extra(tmp);
336
337 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
338 sizeof(__be32));
339 }
340
esp_output_set_esn(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)341 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
342 struct xfrm_state *x,
343 struct ip_esp_hdr *esph,
344 struct esp_output_extra *extra)
345 {
346 /* For ESN we move the header forward by 4 bytes to
347 * accomodate the high bits. We will move it back after
348 * encryption.
349 */
350 if ((x->props.flags & XFRM_STATE_ESN)) {
351 __u32 seqhi;
352 struct xfrm_offload *xo = xfrm_offload(skb);
353
354 if (xo)
355 seqhi = xo->seq.hi;
356 else
357 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
358
359 extra->esphoff = (unsigned char *)esph -
360 skb_transport_header(skb);
361 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
362 extra->seqhi = esph->spi;
363 esph->seq_no = htonl(seqhi);
364 }
365
366 esph->spi = x->id.spi;
367
368 return esph;
369 }
370
esp_output_done_esn(struct crypto_async_request * base,int err)371 static void esp_output_done_esn(struct crypto_async_request *base, int err)
372 {
373 struct sk_buff *skb = base->data;
374
375 esp_output_restore_header(skb);
376 esp_output_done(base, err);
377 }
378
esp6_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)379 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
380 int encap_type,
381 struct esp_info *esp,
382 __be16 sport,
383 __be16 dport)
384 {
385 struct udphdr *uh;
386 __be32 *udpdata32;
387 unsigned int len;
388
389 len = skb->len + esp->tailen - skb_transport_offset(skb);
390 if (len > U16_MAX)
391 return ERR_PTR(-EMSGSIZE);
392
393 uh = (struct udphdr *)esp->esph;
394 uh->source = sport;
395 uh->dest = dport;
396 uh->len = htons(len);
397 uh->check = 0;
398
399 *skb_mac_header(skb) = IPPROTO_UDP;
400
401 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
402 udpdata32 = (__be32 *)(uh + 1);
403 udpdata32[0] = udpdata32[1] = 0;
404 return (struct ip_esp_hdr *)(udpdata32 + 2);
405 }
406
407 return (struct ip_esp_hdr *)(uh + 1);
408 }
409
410 #ifdef CONFIG_INET6_ESPINTCP
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)411 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
412 struct sk_buff *skb,
413 struct esp_info *esp)
414 {
415 __be16 *lenp = (void *)esp->esph;
416 struct ip_esp_hdr *esph;
417 unsigned int len;
418 struct sock *sk;
419
420 len = skb->len + esp->tailen - skb_transport_offset(skb);
421 if (len > IP_MAX_MTU)
422 return ERR_PTR(-EMSGSIZE);
423
424 rcu_read_lock();
425 sk = esp6_find_tcp_sk(x);
426 rcu_read_unlock();
427
428 if (IS_ERR(sk))
429 return ERR_CAST(sk);
430
431 *lenp = htons(len);
432 esph = (struct ip_esp_hdr *)(lenp + 1);
433
434 return esph;
435 }
436 #else
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)437 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
438 struct sk_buff *skb,
439 struct esp_info *esp)
440 {
441 return ERR_PTR(-EOPNOTSUPP);
442 }
443 #endif
444
esp6_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)445 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
446 struct esp_info *esp)
447 {
448 struct xfrm_encap_tmpl *encap = x->encap;
449 struct ip_esp_hdr *esph;
450 __be16 sport, dport;
451 int encap_type;
452
453 spin_lock_bh(&x->lock);
454 sport = encap->encap_sport;
455 dport = encap->encap_dport;
456 encap_type = encap->encap_type;
457 spin_unlock_bh(&x->lock);
458
459 switch (encap_type) {
460 default:
461 case UDP_ENCAP_ESPINUDP:
462 case UDP_ENCAP_ESPINUDP_NON_IKE:
463 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
464 break;
465 case TCP_ENCAP_ESPINTCP:
466 esph = esp6_output_tcp_encap(x, skb, esp);
467 break;
468 }
469
470 if (IS_ERR(esph))
471 return PTR_ERR(esph);
472
473 esp->esph = esph;
474
475 return 0;
476 }
477
esp6_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)478 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
479 {
480 u8 *tail;
481 int nfrags;
482 int esph_offset;
483 struct page *page;
484 struct sk_buff *trailer;
485 int tailen = esp->tailen;
486
487 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
488 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
489 goto cow;
490
491 if (x->encap) {
492 int err = esp6_output_encap(x, skb, esp);
493
494 if (err < 0)
495 return err;
496 }
497
498 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
499 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
500 goto cow;
501
502 if (!skb_cloned(skb)) {
503 if (tailen <= skb_tailroom(skb)) {
504 nfrags = 1;
505 trailer = skb;
506 tail = skb_tail_pointer(trailer);
507
508 goto skip_cow;
509 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
510 && !skb_has_frag_list(skb)) {
511 int allocsize;
512 struct sock *sk = skb->sk;
513 struct page_frag *pfrag = &x->xfrag;
514
515 esp->inplace = false;
516
517 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
518
519 spin_lock_bh(&x->lock);
520
521 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
522 spin_unlock_bh(&x->lock);
523 goto cow;
524 }
525
526 page = pfrag->page;
527 get_page(page);
528
529 tail = page_address(page) + pfrag->offset;
530
531 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
532
533 nfrags = skb_shinfo(skb)->nr_frags;
534
535 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
536 tailen);
537 skb_shinfo(skb)->nr_frags = ++nfrags;
538
539 pfrag->offset = pfrag->offset + allocsize;
540
541 spin_unlock_bh(&x->lock);
542
543 nfrags++;
544
545 skb->len += tailen;
546 skb->data_len += tailen;
547 skb->truesize += tailen;
548 if (sk && sk_fullsock(sk))
549 refcount_add(tailen, &sk->sk_wmem_alloc);
550
551 goto out;
552 }
553 }
554
555 cow:
556 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
557
558 nfrags = skb_cow_data(skb, tailen, &trailer);
559 if (nfrags < 0)
560 goto out;
561 tail = skb_tail_pointer(trailer);
562 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
563
564 skip_cow:
565 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
566 pskb_put(skb, trailer, tailen);
567
568 out:
569 return nfrags;
570 }
571 EXPORT_SYMBOL_GPL(esp6_output_head);
572
esp6_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)573 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
574 {
575 u8 *iv;
576 int alen;
577 void *tmp;
578 int ivlen;
579 int assoclen;
580 int extralen;
581 struct page *page;
582 struct ip_esp_hdr *esph;
583 struct aead_request *req;
584 struct crypto_aead *aead;
585 struct scatterlist *sg, *dsg;
586 struct esp_output_extra *extra;
587 int err = -ENOMEM;
588
589 assoclen = sizeof(struct ip_esp_hdr);
590 extralen = 0;
591
592 if (x->props.flags & XFRM_STATE_ESN) {
593 extralen += sizeof(*extra);
594 assoclen += sizeof(__be32);
595 }
596
597 aead = x->data;
598 alen = crypto_aead_authsize(aead);
599 ivlen = crypto_aead_ivsize(aead);
600
601 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
602 if (!tmp)
603 goto error;
604
605 extra = esp_tmp_extra(tmp);
606 iv = esp_tmp_iv(aead, tmp, extralen);
607 req = esp_tmp_req(aead, iv);
608 sg = esp_req_sg(aead, req);
609
610 if (esp->inplace)
611 dsg = sg;
612 else
613 dsg = &sg[esp->nfrags];
614
615 esph = esp_output_set_esn(skb, x, esp->esph, extra);
616 esp->esph = esph;
617
618 sg_init_table(sg, esp->nfrags);
619 err = skb_to_sgvec(skb, sg,
620 (unsigned char *)esph - skb->data,
621 assoclen + ivlen + esp->clen + alen);
622 if (unlikely(err < 0))
623 goto error_free;
624
625 if (!esp->inplace) {
626 int allocsize;
627 struct page_frag *pfrag = &x->xfrag;
628
629 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
630
631 spin_lock_bh(&x->lock);
632 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
633 spin_unlock_bh(&x->lock);
634 goto error_free;
635 }
636
637 skb_shinfo(skb)->nr_frags = 1;
638
639 page = pfrag->page;
640 get_page(page);
641 /* replace page frags in skb with new page */
642 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
643 pfrag->offset = pfrag->offset + allocsize;
644 spin_unlock_bh(&x->lock);
645
646 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
647 err = skb_to_sgvec(skb, dsg,
648 (unsigned char *)esph - skb->data,
649 assoclen + ivlen + esp->clen + alen);
650 if (unlikely(err < 0))
651 goto error_free;
652 }
653
654 if ((x->props.flags & XFRM_STATE_ESN))
655 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
656 else
657 aead_request_set_callback(req, 0, esp_output_done, skb);
658
659 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
660 aead_request_set_ad(req, assoclen);
661
662 memset(iv, 0, ivlen);
663 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
664 min(ivlen, 8));
665
666 ESP_SKB_CB(skb)->tmp = tmp;
667 err = crypto_aead_encrypt(req);
668
669 switch (err) {
670 case -EINPROGRESS:
671 goto error;
672
673 case -ENOSPC:
674 err = NET_XMIT_DROP;
675 break;
676
677 case 0:
678 if ((x->props.flags & XFRM_STATE_ESN))
679 esp_output_restore_header(skb);
680 esp_output_encap_csum(skb);
681 }
682
683 if (sg != dsg)
684 esp_ssg_unref(x, tmp);
685
686 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
687 err = esp_output_tail_tcp(x, skb);
688
689 error_free:
690 kfree(tmp);
691 error:
692 return err;
693 }
694 EXPORT_SYMBOL_GPL(esp6_output_tail);
695
esp6_output(struct xfrm_state * x,struct sk_buff * skb)696 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
697 {
698 int alen;
699 int blksize;
700 struct ip_esp_hdr *esph;
701 struct crypto_aead *aead;
702 struct esp_info esp;
703
704 esp.inplace = true;
705
706 esp.proto = *skb_mac_header(skb);
707 *skb_mac_header(skb) = IPPROTO_ESP;
708
709 /* skb is pure payload to encrypt */
710
711 aead = x->data;
712 alen = crypto_aead_authsize(aead);
713
714 esp.tfclen = 0;
715 if (x->tfcpad) {
716 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
717 u32 padto;
718
719 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
720 if (skb->len < padto)
721 esp.tfclen = padto - skb->len;
722 }
723 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
724 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
725 esp.plen = esp.clen - skb->len - esp.tfclen;
726 esp.tailen = esp.tfclen + esp.plen + alen;
727
728 esp.esph = ip_esp_hdr(skb);
729
730 esp.nfrags = esp6_output_head(x, skb, &esp);
731 if (esp.nfrags < 0)
732 return esp.nfrags;
733
734 esph = esp.esph;
735 esph->spi = x->id.spi;
736
737 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
738 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
739 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
740
741 skb_push(skb, -skb_network_offset(skb));
742
743 return esp6_output_tail(x, skb, &esp);
744 }
745
esp_remove_trailer(struct sk_buff * skb)746 static inline int esp_remove_trailer(struct sk_buff *skb)
747 {
748 struct xfrm_state *x = xfrm_input_state(skb);
749 struct xfrm_offload *xo = xfrm_offload(skb);
750 struct crypto_aead *aead = x->data;
751 int alen, hlen, elen;
752 int padlen, trimlen;
753 __wsum csumdiff;
754 u8 nexthdr[2];
755 int ret;
756
757 alen = crypto_aead_authsize(aead);
758 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
759 elen = skb->len - hlen;
760
761 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
762 ret = xo->proto;
763 goto out;
764 }
765
766 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
767 BUG_ON(ret);
768
769 ret = -EINVAL;
770 padlen = nexthdr[0];
771 if (padlen + 2 + alen >= elen) {
772 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
773 padlen + 2, elen - alen);
774 goto out;
775 }
776
777 trimlen = alen + padlen + 2;
778 if (skb->ip_summed == CHECKSUM_COMPLETE) {
779 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
780 skb->csum = csum_block_sub(skb->csum, csumdiff,
781 skb->len - trimlen);
782 }
783 ret = pskb_trim(skb, skb->len - trimlen);
784 if (unlikely(ret))
785 return ret;
786
787 ret = nexthdr[1];
788
789 out:
790 return ret;
791 }
792
esp6_input_done2(struct sk_buff * skb,int err)793 int esp6_input_done2(struct sk_buff *skb, int err)
794 {
795 struct xfrm_state *x = xfrm_input_state(skb);
796 struct xfrm_offload *xo = xfrm_offload(skb);
797 struct crypto_aead *aead = x->data;
798 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
799 int hdr_len = skb_network_header_len(skb);
800
801 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
802 kfree(ESP_SKB_CB(skb)->tmp);
803
804 if (unlikely(err))
805 goto out;
806
807 err = esp_remove_trailer(skb);
808 if (unlikely(err < 0))
809 goto out;
810
811 if (x->encap) {
812 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
813 int offset = skb_network_offset(skb) + sizeof(*ip6h);
814 struct xfrm_encap_tmpl *encap = x->encap;
815 u8 nexthdr = ip6h->nexthdr;
816 __be16 frag_off, source;
817 struct udphdr *uh;
818 struct tcphdr *th;
819
820 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
821 if (offset == -1) {
822 err = -EINVAL;
823 goto out;
824 }
825
826 uh = (void *)(skb->data + offset);
827 th = (void *)(skb->data + offset);
828 hdr_len += offset;
829
830 switch (x->encap->encap_type) {
831 case TCP_ENCAP_ESPINTCP:
832 source = th->source;
833 break;
834 case UDP_ENCAP_ESPINUDP:
835 case UDP_ENCAP_ESPINUDP_NON_IKE:
836 source = uh->source;
837 break;
838 default:
839 WARN_ON_ONCE(1);
840 err = -EINVAL;
841 goto out;
842 }
843
844 /*
845 * 1) if the NAT-T peer's IP or port changed then
846 * advertize the change to the keying daemon.
847 * This is an inbound SA, so just compare
848 * SRC ports.
849 */
850 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
851 source != encap->encap_sport) {
852 xfrm_address_t ipaddr;
853
854 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
855 km_new_mapping(x, &ipaddr, source);
856
857 /* XXX: perhaps add an extra
858 * policy check here, to see
859 * if we should allow or
860 * reject a packet from a
861 * different source
862 * address/port.
863 */
864 }
865
866 /*
867 * 2) ignore UDP/TCP checksums in case
868 * of NAT-T in Transport Mode, or
869 * perform other post-processing fixes
870 * as per draft-ietf-ipsec-udp-encaps-06,
871 * section 3.1.2
872 */
873 if (x->props.mode == XFRM_MODE_TRANSPORT)
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875 }
876
877 skb_postpull_rcsum(skb, skb_network_header(skb),
878 skb_network_header_len(skb));
879 skb_pull_rcsum(skb, hlen);
880 if (x->props.mode == XFRM_MODE_TUNNEL)
881 skb_reset_transport_header(skb);
882 else
883 skb_set_transport_header(skb, -hdr_len);
884
885 /* RFC4303: Drop dummy packets without any error */
886 if (err == IPPROTO_NONE)
887 err = -EINVAL;
888
889 out:
890 return err;
891 }
892 EXPORT_SYMBOL_GPL(esp6_input_done2);
893
esp_input_done(struct crypto_async_request * base,int err)894 static void esp_input_done(struct crypto_async_request *base, int err)
895 {
896 struct sk_buff *skb = base->data;
897
898 xfrm_input_resume(skb, esp6_input_done2(skb, err));
899 }
900
esp_input_restore_header(struct sk_buff * skb)901 static void esp_input_restore_header(struct sk_buff *skb)
902 {
903 esp_restore_header(skb, 0);
904 __skb_pull(skb, 4);
905 }
906
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)907 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
908 {
909 struct xfrm_state *x = xfrm_input_state(skb);
910
911 /* For ESN we move the header forward by 4 bytes to
912 * accomodate the high bits. We will move it back after
913 * decryption.
914 */
915 if ((x->props.flags & XFRM_STATE_ESN)) {
916 struct ip_esp_hdr *esph = skb_push(skb, 4);
917
918 *seqhi = esph->spi;
919 esph->spi = esph->seq_no;
920 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
921 }
922 }
923
esp_input_done_esn(struct crypto_async_request * base,int err)924 static void esp_input_done_esn(struct crypto_async_request *base, int err)
925 {
926 struct sk_buff *skb = base->data;
927
928 esp_input_restore_header(skb);
929 esp_input_done(base, err);
930 }
931
esp6_input(struct xfrm_state * x,struct sk_buff * skb)932 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
933 {
934 struct crypto_aead *aead = x->data;
935 struct aead_request *req;
936 struct sk_buff *trailer;
937 int ivlen = crypto_aead_ivsize(aead);
938 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
939 int nfrags;
940 int assoclen;
941 int seqhilen;
942 int ret = 0;
943 void *tmp;
944 __be32 *seqhi;
945 u8 *iv;
946 struct scatterlist *sg;
947
948 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
949 ret = -EINVAL;
950 goto out;
951 }
952
953 if (elen <= 0) {
954 ret = -EINVAL;
955 goto out;
956 }
957
958 assoclen = sizeof(struct ip_esp_hdr);
959 seqhilen = 0;
960
961 if (x->props.flags & XFRM_STATE_ESN) {
962 seqhilen += sizeof(__be32);
963 assoclen += seqhilen;
964 }
965
966 if (!skb_cloned(skb)) {
967 if (!skb_is_nonlinear(skb)) {
968 nfrags = 1;
969
970 goto skip_cow;
971 } else if (!skb_has_frag_list(skb)) {
972 nfrags = skb_shinfo(skb)->nr_frags;
973 nfrags++;
974
975 goto skip_cow;
976 }
977 }
978
979 nfrags = skb_cow_data(skb, 0, &trailer);
980 if (nfrags < 0) {
981 ret = -EINVAL;
982 goto out;
983 }
984
985 skip_cow:
986 ret = -ENOMEM;
987 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
988 if (!tmp)
989 goto out;
990
991 ESP_SKB_CB(skb)->tmp = tmp;
992 seqhi = esp_tmp_extra(tmp);
993 iv = esp_tmp_iv(aead, tmp, seqhilen);
994 req = esp_tmp_req(aead, iv);
995 sg = esp_req_sg(aead, req);
996
997 esp_input_set_header(skb, seqhi);
998
999 sg_init_table(sg, nfrags);
1000 ret = skb_to_sgvec(skb, sg, 0, skb->len);
1001 if (unlikely(ret < 0)) {
1002 kfree(tmp);
1003 goto out;
1004 }
1005
1006 skb->ip_summed = CHECKSUM_NONE;
1007
1008 if ((x->props.flags & XFRM_STATE_ESN))
1009 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
1010 else
1011 aead_request_set_callback(req, 0, esp_input_done, skb);
1012
1013 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1014 aead_request_set_ad(req, assoclen);
1015
1016 ret = crypto_aead_decrypt(req);
1017 if (ret == -EINPROGRESS)
1018 goto out;
1019
1020 if ((x->props.flags & XFRM_STATE_ESN))
1021 esp_input_restore_header(skb);
1022
1023 ret = esp6_input_done2(skb, ret);
1024
1025 out:
1026 return ret;
1027 }
1028
esp6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)1029 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1030 u8 type, u8 code, int offset, __be32 info)
1031 {
1032 struct net *net = dev_net(skb->dev);
1033 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1034 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1035 struct xfrm_state *x;
1036
1037 if (type != ICMPV6_PKT_TOOBIG &&
1038 type != NDISC_REDIRECT)
1039 return 0;
1040
1041 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1042 esph->spi, IPPROTO_ESP, AF_INET6);
1043 if (!x)
1044 return 0;
1045
1046 if (type == NDISC_REDIRECT)
1047 ip6_redirect(skb, net, skb->dev->ifindex, 0,
1048 sock_net_uid(net, NULL));
1049 else
1050 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1051 xfrm_state_put(x);
1052
1053 return 0;
1054 }
1055
esp6_destroy(struct xfrm_state * x)1056 static void esp6_destroy(struct xfrm_state *x)
1057 {
1058 struct crypto_aead *aead = x->data;
1059
1060 if (!aead)
1061 return;
1062
1063 crypto_free_aead(aead);
1064 }
1065
esp_init_aead(struct xfrm_state * x)1066 static int esp_init_aead(struct xfrm_state *x)
1067 {
1068 char aead_name[CRYPTO_MAX_ALG_NAME];
1069 struct crypto_aead *aead;
1070 int err;
1071
1072 err = -ENAMETOOLONG;
1073 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1074 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1075 goto error;
1076
1077 aead = crypto_alloc_aead(aead_name, 0, 0);
1078 err = PTR_ERR(aead);
1079 if (IS_ERR(aead))
1080 goto error;
1081
1082 x->data = aead;
1083
1084 err = crypto_aead_setkey(aead, x->aead->alg_key,
1085 (x->aead->alg_key_len + 7) / 8);
1086 if (err)
1087 goto error;
1088
1089 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1090 if (err)
1091 goto error;
1092
1093 error:
1094 return err;
1095 }
1096
esp_init_authenc(struct xfrm_state * x)1097 static int esp_init_authenc(struct xfrm_state *x)
1098 {
1099 struct crypto_aead *aead;
1100 struct crypto_authenc_key_param *param;
1101 struct rtattr *rta;
1102 char *key;
1103 char *p;
1104 char authenc_name[CRYPTO_MAX_ALG_NAME];
1105 unsigned int keylen;
1106 int err;
1107
1108 err = -EINVAL;
1109 if (!x->ealg)
1110 goto error;
1111
1112 err = -ENAMETOOLONG;
1113
1114 if ((x->props.flags & XFRM_STATE_ESN)) {
1115 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1116 "%s%sauthencesn(%s,%s)%s",
1117 x->geniv ?: "", x->geniv ? "(" : "",
1118 x->aalg ? x->aalg->alg_name : "digest_null",
1119 x->ealg->alg_name,
1120 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1121 goto error;
1122 } else {
1123 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1124 "%s%sauthenc(%s,%s)%s",
1125 x->geniv ?: "", x->geniv ? "(" : "",
1126 x->aalg ? x->aalg->alg_name : "digest_null",
1127 x->ealg->alg_name,
1128 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1129 goto error;
1130 }
1131
1132 aead = crypto_alloc_aead(authenc_name, 0, 0);
1133 err = PTR_ERR(aead);
1134 if (IS_ERR(aead))
1135 goto error;
1136
1137 x->data = aead;
1138
1139 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1140 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1141 err = -ENOMEM;
1142 key = kmalloc(keylen, GFP_KERNEL);
1143 if (!key)
1144 goto error;
1145
1146 p = key;
1147 rta = (void *)p;
1148 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1149 rta->rta_len = RTA_LENGTH(sizeof(*param));
1150 param = RTA_DATA(rta);
1151 p += RTA_SPACE(sizeof(*param));
1152
1153 if (x->aalg) {
1154 struct xfrm_algo_desc *aalg_desc;
1155
1156 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1157 p += (x->aalg->alg_key_len + 7) / 8;
1158
1159 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1160 BUG_ON(!aalg_desc);
1161
1162 err = -EINVAL;
1163 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1164 crypto_aead_authsize(aead)) {
1165 pr_info("ESP: %s digestsize %u != %hu\n",
1166 x->aalg->alg_name,
1167 crypto_aead_authsize(aead),
1168 aalg_desc->uinfo.auth.icv_fullbits / 8);
1169 goto free_key;
1170 }
1171
1172 err = crypto_aead_setauthsize(
1173 aead, x->aalg->alg_trunc_len / 8);
1174 if (err)
1175 goto free_key;
1176 }
1177
1178 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1179 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1180
1181 err = crypto_aead_setkey(aead, key, keylen);
1182
1183 free_key:
1184 kfree(key);
1185
1186 error:
1187 return err;
1188 }
1189
esp6_init_state(struct xfrm_state * x)1190 static int esp6_init_state(struct xfrm_state *x)
1191 {
1192 struct crypto_aead *aead;
1193 u32 align;
1194 int err;
1195
1196 x->data = NULL;
1197
1198 if (x->aead)
1199 err = esp_init_aead(x);
1200 else
1201 err = esp_init_authenc(x);
1202
1203 if (err)
1204 goto error;
1205
1206 aead = x->data;
1207
1208 x->props.header_len = sizeof(struct ip_esp_hdr) +
1209 crypto_aead_ivsize(aead);
1210 switch (x->props.mode) {
1211 case XFRM_MODE_BEET:
1212 if (x->sel.family != AF_INET6)
1213 x->props.header_len += IPV4_BEET_PHMAXLEN +
1214 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1215 break;
1216 default:
1217 case XFRM_MODE_TRANSPORT:
1218 break;
1219 case XFRM_MODE_TUNNEL:
1220 x->props.header_len += sizeof(struct ipv6hdr);
1221 break;
1222 }
1223
1224 if (x->encap) {
1225 struct xfrm_encap_tmpl *encap = x->encap;
1226
1227 switch (encap->encap_type) {
1228 default:
1229 err = -EINVAL;
1230 goto error;
1231 case UDP_ENCAP_ESPINUDP:
1232 x->props.header_len += sizeof(struct udphdr);
1233 break;
1234 case UDP_ENCAP_ESPINUDP_NON_IKE:
1235 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1236 break;
1237 #ifdef CONFIG_INET6_ESPINTCP
1238 case TCP_ENCAP_ESPINTCP:
1239 /* only the length field, TCP encap is done by
1240 * the socket
1241 */
1242 x->props.header_len += 2;
1243 break;
1244 #endif
1245 }
1246 }
1247
1248 align = ALIGN(crypto_aead_blocksize(aead), 4);
1249 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1250
1251 error:
1252 return err;
1253 }
1254
esp6_rcv_cb(struct sk_buff * skb,int err)1255 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1256 {
1257 return 0;
1258 }
1259
1260 static const struct xfrm_type esp6_type = {
1261 .description = "ESP6",
1262 .owner = THIS_MODULE,
1263 .proto = IPPROTO_ESP,
1264 .flags = XFRM_TYPE_REPLAY_PROT,
1265 .init_state = esp6_init_state,
1266 .destructor = esp6_destroy,
1267 .input = esp6_input,
1268 .output = esp6_output,
1269 .hdr_offset = xfrm6_find_1stfragopt,
1270 };
1271
1272 static struct xfrm6_protocol esp6_protocol = {
1273 .handler = xfrm6_rcv,
1274 .input_handler = xfrm_input,
1275 .cb_handler = esp6_rcv_cb,
1276 .err_handler = esp6_err,
1277 .priority = 0,
1278 };
1279
esp6_init(void)1280 static int __init esp6_init(void)
1281 {
1282 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1283 pr_info("%s: can't add xfrm type\n", __func__);
1284 return -EAGAIN;
1285 }
1286 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1287 pr_info("%s: can't add protocol\n", __func__);
1288 xfrm_unregister_type(&esp6_type, AF_INET6);
1289 return -EAGAIN;
1290 }
1291
1292 return 0;
1293 }
1294
esp6_fini(void)1295 static void __exit esp6_fini(void)
1296 {
1297 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1298 pr_info("%s: can't remove protocol\n", __func__);
1299 xfrm_unregister_type(&esp6_type, AF_INET6);
1300 }
1301
1302 module_init(esp6_init);
1303 module_exit(esp6_fini);
1304
1305 MODULE_LICENSE("GPL");
1306 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1307