1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
5 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14 #define pr_fmt(fmt) "IPv6: " fmt
15
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
31 #include <net/icmp.h>
32 #include <net/ipv6.h>
33 #include <net/protocol.h>
34 #include <net/udp.h>
35 #include <linux/icmpv6.h>
36 #include <net/tcp.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39 #include <linux/skbuff_ref.h>
40
41 #include <linux/highmem.h>
42
43 struct esp_skb_cb {
44 struct xfrm_skb_cb xfrm;
45 void *tmp;
46 };
47
48 struct esp_output_extra {
49 __be32 seqhi;
50 u32 esphoff;
51 };
52
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54
55 /*
56 * Allocate an AEAD request structure with extra space for SG and IV.
57 *
58 * For alignment considerations the upper 32 bits of the sequence number are
59 * placed at the front, if present. Followed by the IV, the request and finally
60 * the SG list.
61 *
62 * TODO: Use spare space in skb for this where possible.
63 */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int seqihlen)64 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
65 {
66 unsigned int len;
67
68 len = seqihlen;
69
70 len += crypto_aead_ivsize(aead);
71
72 if (len) {
73 len += crypto_aead_alignmask(aead) &
74 ~(crypto_tfm_ctx_alignment() - 1);
75 len = ALIGN(len, crypto_tfm_ctx_alignment());
76 }
77
78 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
79 len = ALIGN(len, __alignof__(struct scatterlist));
80
81 len += sizeof(struct scatterlist) * nfrags;
82
83 return kmalloc(len, GFP_ATOMIC);
84 }
85
esp_tmp_extra(void * tmp)86 static inline void *esp_tmp_extra(void *tmp)
87 {
88 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
89 }
90
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int seqhilen)91 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
92 {
93 return crypto_aead_ivsize(aead) ?
94 PTR_ALIGN((u8 *)tmp + seqhilen,
95 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
96 }
97
esp_tmp_req(struct crypto_aead * aead,u8 * iv)98 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
99 {
100 struct aead_request *req;
101
102 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
103 crypto_tfm_ctx_alignment());
104 aead_request_set_tfm(req, aead);
105 return req;
106 }
107
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)108 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
109 struct aead_request *req)
110 {
111 return (void *)ALIGN((unsigned long)(req + 1) +
112 crypto_aead_reqsize(aead),
113 __alignof__(struct scatterlist));
114 }
115
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)116 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
117 {
118 struct crypto_aead *aead = x->data;
119 int extralen = 0;
120 u8 *iv;
121 struct aead_request *req;
122 struct scatterlist *sg;
123
124 if (x->props.flags & XFRM_STATE_ESN)
125 extralen += sizeof(struct esp_output_extra);
126
127 iv = esp_tmp_iv(aead, tmp, extralen);
128 req = esp_tmp_req(aead, iv);
129
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
132 */
133 if (req->src != req->dst)
134 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
135 skb_page_unref(page_to_netmem(sg_page(sg)),
136 skb->pp_recycle);
137 }
138
139 #ifdef CONFIG_INET6_ESPINTCP
140 struct esp_tcp_sk {
141 struct sock *sk;
142 struct rcu_head rcu;
143 };
144
esp_free_tcp_sk(struct rcu_head * head)145 static void esp_free_tcp_sk(struct rcu_head *head)
146 {
147 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
148
149 sock_put(esk->sk);
150 kfree(esk);
151 }
152
esp6_find_tcp_sk(struct xfrm_state * x)153 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
154 {
155 struct xfrm_encap_tmpl *encap = x->encap;
156 struct net *net = xs_net(x);
157 struct esp_tcp_sk *esk;
158 __be16 sport, dport;
159 struct sock *nsk;
160 struct sock *sk;
161
162 sk = rcu_dereference(x->encap_sk);
163 if (sk && sk->sk_state == TCP_ESTABLISHED)
164 return sk;
165
166 spin_lock_bh(&x->lock);
167 sport = encap->encap_sport;
168 dport = encap->encap_dport;
169 nsk = rcu_dereference_protected(x->encap_sk,
170 lockdep_is_held(&x->lock));
171 if (sk && sk == nsk) {
172 esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
173 if (!esk) {
174 spin_unlock_bh(&x->lock);
175 return ERR_PTR(-ENOMEM);
176 }
177 RCU_INIT_POINTER(x->encap_sk, NULL);
178 esk->sk = sk;
179 call_rcu(&esk->rcu, esp_free_tcp_sk);
180 }
181 spin_unlock_bh(&x->lock);
182
183 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
184 dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
185 if (!sk)
186 return ERR_PTR(-ENOENT);
187
188 if (!tcp_is_ulp_esp(sk)) {
189 sock_put(sk);
190 return ERR_PTR(-EINVAL);
191 }
192
193 spin_lock_bh(&x->lock);
194 nsk = rcu_dereference_protected(x->encap_sk,
195 lockdep_is_held(&x->lock));
196 if (encap->encap_sport != sport ||
197 encap->encap_dport != dport) {
198 sock_put(sk);
199 sk = nsk ?: ERR_PTR(-EREMCHG);
200 } else if (sk == nsk) {
201 sock_put(sk);
202 } else {
203 rcu_assign_pointer(x->encap_sk, sk);
204 }
205 spin_unlock_bh(&x->lock);
206
207 return sk;
208 }
209
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)210 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
211 {
212 struct sock *sk;
213 int err;
214
215 rcu_read_lock();
216
217 sk = esp6_find_tcp_sk(x);
218 err = PTR_ERR_OR_ZERO(sk);
219 if (err) {
220 kfree_skb(skb);
221 goto out;
222 }
223
224 bh_lock_sock(sk);
225 if (sock_owned_by_user(sk))
226 err = espintcp_queue_out(sk, skb);
227 else
228 err = espintcp_push_skb(sk, skb);
229 bh_unlock_sock(sk);
230
231 out:
232 rcu_read_unlock();
233 return err;
234 }
235
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)236 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
237 struct sk_buff *skb)
238 {
239 struct dst_entry *dst = skb_dst(skb);
240 struct xfrm_state *x = dst->xfrm;
241
242 return esp_output_tcp_finish(x, skb);
243 }
244
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)245 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
246 {
247 int err;
248
249 local_bh_disable();
250 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
251 local_bh_enable();
252
253 /* EINPROGRESS just happens to do the right thing. It
254 * actually means that the skb has been consumed and
255 * isn't coming back.
256 */
257 return err ?: -EINPROGRESS;
258 }
259 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)260 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
261 {
262 WARN_ON(1);
263 return -EOPNOTSUPP;
264 }
265 #endif
266
esp_output_encap_csum(struct sk_buff * skb)267 static void esp_output_encap_csum(struct sk_buff *skb)
268 {
269 /* UDP encap with IPv6 requires a valid checksum */
270 if (*skb_mac_header(skb) == IPPROTO_UDP) {
271 struct udphdr *uh = udp_hdr(skb);
272 struct ipv6hdr *ip6h = ipv6_hdr(skb);
273 int len = ntohs(uh->len);
274 unsigned int offset = skb_transport_offset(skb);
275 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
276
277 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
278 len, IPPROTO_UDP, csum);
279 if (uh->check == 0)
280 uh->check = CSUM_MANGLED_0;
281 }
282 }
283
esp_output_done(void * data,int err)284 static void esp_output_done(void *data, int err)
285 {
286 struct sk_buff *skb = data;
287 struct xfrm_offload *xo = xfrm_offload(skb);
288 void *tmp;
289 struct xfrm_state *x;
290
291 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
292 struct sec_path *sp = skb_sec_path(skb);
293
294 x = sp->xvec[sp->len - 1];
295 } else {
296 x = skb_dst(skb)->xfrm;
297 }
298
299 tmp = ESP_SKB_CB(skb)->tmp;
300 esp_ssg_unref(x, tmp, skb);
301 kfree(tmp);
302
303 esp_output_encap_csum(skb);
304
305 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
306 if (err) {
307 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
308 kfree_skb(skb);
309 return;
310 }
311
312 skb_push(skb, skb->data - skb_mac_header(skb));
313 secpath_reset(skb);
314 xfrm_dev_resume(skb);
315 } else {
316 if (!err &&
317 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
318 esp_output_tail_tcp(x, skb);
319 else
320 xfrm_output_resume(skb->sk, skb, err);
321 }
322 }
323
324 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)325 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
326 {
327 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
328 void *tmp = ESP_SKB_CB(skb)->tmp;
329 __be32 *seqhi = esp_tmp_extra(tmp);
330
331 esph->seq_no = esph->spi;
332 esph->spi = *seqhi;
333 }
334
esp_output_restore_header(struct sk_buff * skb)335 static void esp_output_restore_header(struct sk_buff *skb)
336 {
337 void *tmp = ESP_SKB_CB(skb)->tmp;
338 struct esp_output_extra *extra = esp_tmp_extra(tmp);
339
340 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
341 sizeof(__be32));
342 }
343
esp_output_set_esn(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)344 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
345 struct xfrm_state *x,
346 struct ip_esp_hdr *esph,
347 struct esp_output_extra *extra)
348 {
349 /* For ESN we move the header forward by 4 bytes to
350 * accommodate the high bits. We will move it back after
351 * encryption.
352 */
353 if ((x->props.flags & XFRM_STATE_ESN)) {
354 __u32 seqhi;
355 struct xfrm_offload *xo = xfrm_offload(skb);
356
357 if (xo)
358 seqhi = xo->seq.hi;
359 else
360 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
361
362 extra->esphoff = (unsigned char *)esph -
363 skb_transport_header(skb);
364 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
365 extra->seqhi = esph->spi;
366 esph->seq_no = htonl(seqhi);
367 }
368
369 esph->spi = x->id.spi;
370
371 return esph;
372 }
373
esp_output_done_esn(void * data,int err)374 static void esp_output_done_esn(void *data, int err)
375 {
376 struct sk_buff *skb = data;
377
378 esp_output_restore_header(skb);
379 esp_output_done(data, err);
380 }
381
esp6_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)382 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
383 int encap_type,
384 struct esp_info *esp,
385 __be16 sport,
386 __be16 dport)
387 {
388 struct udphdr *uh;
389 unsigned int len;
390
391 len = skb->len + esp->tailen - skb_transport_offset(skb);
392 if (len > U16_MAX)
393 return ERR_PTR(-EMSGSIZE);
394
395 uh = (struct udphdr *)esp->esph;
396 uh->source = sport;
397 uh->dest = dport;
398 uh->len = htons(len);
399 uh->check = 0;
400
401 *skb_mac_header(skb) = IPPROTO_UDP;
402
403 return (struct ip_esp_hdr *)(uh + 1);
404 }
405
406 #ifdef CONFIG_INET6_ESPINTCP
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)407 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
408 struct sk_buff *skb,
409 struct esp_info *esp)
410 {
411 __be16 *lenp = (void *)esp->esph;
412 struct ip_esp_hdr *esph;
413 unsigned int len;
414 struct sock *sk;
415
416 len = skb->len + esp->tailen - skb_transport_offset(skb);
417 if (len > IP_MAX_MTU)
418 return ERR_PTR(-EMSGSIZE);
419
420 rcu_read_lock();
421 sk = esp6_find_tcp_sk(x);
422 rcu_read_unlock();
423
424 if (IS_ERR(sk))
425 return ERR_CAST(sk);
426
427 *lenp = htons(len);
428 esph = (struct ip_esp_hdr *)(lenp + 1);
429
430 return esph;
431 }
432 #else
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)433 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
434 struct sk_buff *skb,
435 struct esp_info *esp)
436 {
437 return ERR_PTR(-EOPNOTSUPP);
438 }
439 #endif
440
esp6_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)441 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
442 struct esp_info *esp)
443 {
444 struct xfrm_encap_tmpl *encap = x->encap;
445 struct ip_esp_hdr *esph;
446 __be16 sport, dport;
447 int encap_type;
448
449 spin_lock_bh(&x->lock);
450 sport = encap->encap_sport;
451 dport = encap->encap_dport;
452 encap_type = encap->encap_type;
453 spin_unlock_bh(&x->lock);
454
455 switch (encap_type) {
456 default:
457 case UDP_ENCAP_ESPINUDP:
458 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
459 break;
460 case TCP_ENCAP_ESPINTCP:
461 esph = esp6_output_tcp_encap(x, skb, esp);
462 break;
463 }
464
465 if (IS_ERR(esph))
466 return PTR_ERR(esph);
467
468 esp->esph = esph;
469
470 return 0;
471 }
472
esp6_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)473 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
474 {
475 u8 *tail;
476 int nfrags;
477 int esph_offset;
478 struct page *page;
479 struct sk_buff *trailer;
480 int tailen = esp->tailen;
481
482 if (x->encap) {
483 int err = esp6_output_encap(x, skb, esp);
484
485 if (err < 0)
486 return err;
487 }
488
489 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
490 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
491 goto cow;
492
493 if (!skb_cloned(skb)) {
494 if (tailen <= skb_tailroom(skb)) {
495 nfrags = 1;
496 trailer = skb;
497 tail = skb_tail_pointer(trailer);
498
499 goto skip_cow;
500 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
501 && !skb_has_frag_list(skb)) {
502 int allocsize;
503 struct sock *sk = skb->sk;
504 struct page_frag *pfrag = &x->xfrag;
505
506 esp->inplace = false;
507
508 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
509
510 spin_lock_bh(&x->lock);
511
512 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
513 spin_unlock_bh(&x->lock);
514 goto cow;
515 }
516
517 page = pfrag->page;
518 get_page(page);
519
520 tail = page_address(page) + pfrag->offset;
521
522 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
523
524 nfrags = skb_shinfo(skb)->nr_frags;
525
526 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
527 tailen);
528 skb_shinfo(skb)->nr_frags = ++nfrags;
529
530 pfrag->offset = pfrag->offset + allocsize;
531
532 spin_unlock_bh(&x->lock);
533
534 nfrags++;
535
536 skb->len += tailen;
537 skb->data_len += tailen;
538 skb->truesize += tailen;
539 if (sk && sk_fullsock(sk))
540 refcount_add(tailen, &sk->sk_wmem_alloc);
541
542 goto out;
543 }
544 }
545
546 cow:
547 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
548
549 nfrags = skb_cow_data(skb, tailen, &trailer);
550 if (nfrags < 0)
551 goto out;
552 tail = skb_tail_pointer(trailer);
553 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
554
555 skip_cow:
556 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
557 pskb_put(skb, trailer, tailen);
558
559 out:
560 return nfrags;
561 }
562 EXPORT_SYMBOL_GPL(esp6_output_head);
563
esp6_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)564 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
565 {
566 u8 *iv;
567 int alen;
568 void *tmp;
569 int ivlen;
570 int assoclen;
571 int extralen;
572 struct page *page;
573 struct ip_esp_hdr *esph;
574 struct aead_request *req;
575 struct crypto_aead *aead;
576 struct scatterlist *sg, *dsg;
577 struct esp_output_extra *extra;
578 int err = -ENOMEM;
579
580 assoclen = sizeof(struct ip_esp_hdr);
581 extralen = 0;
582
583 if (x->props.flags & XFRM_STATE_ESN) {
584 extralen += sizeof(*extra);
585 assoclen += sizeof(__be32);
586 }
587
588 aead = x->data;
589 alen = crypto_aead_authsize(aead);
590 ivlen = crypto_aead_ivsize(aead);
591
592 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
593 if (!tmp)
594 goto error;
595
596 extra = esp_tmp_extra(tmp);
597 iv = esp_tmp_iv(aead, tmp, extralen);
598 req = esp_tmp_req(aead, iv);
599 sg = esp_req_sg(aead, req);
600
601 if (esp->inplace)
602 dsg = sg;
603 else
604 dsg = &sg[esp->nfrags];
605
606 esph = esp_output_set_esn(skb, x, esp->esph, extra);
607 esp->esph = esph;
608
609 sg_init_table(sg, esp->nfrags);
610 err = skb_to_sgvec(skb, sg,
611 (unsigned char *)esph - skb->data,
612 assoclen + ivlen + esp->clen + alen);
613 if (unlikely(err < 0))
614 goto error_free;
615
616 if (!esp->inplace) {
617 int allocsize;
618 struct page_frag *pfrag = &x->xfrag;
619
620 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
621
622 spin_lock_bh(&x->lock);
623 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
624 spin_unlock_bh(&x->lock);
625 goto error_free;
626 }
627
628 skb_shinfo(skb)->nr_frags = 1;
629
630 page = pfrag->page;
631 get_page(page);
632 /* replace page frags in skb with new page */
633 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
634 pfrag->offset = pfrag->offset + allocsize;
635 spin_unlock_bh(&x->lock);
636
637 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
638 err = skb_to_sgvec(skb, dsg,
639 (unsigned char *)esph - skb->data,
640 assoclen + ivlen + esp->clen + alen);
641 if (unlikely(err < 0))
642 goto error_free;
643 }
644
645 if ((x->props.flags & XFRM_STATE_ESN))
646 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
647 else
648 aead_request_set_callback(req, 0, esp_output_done, skb);
649
650 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
651 aead_request_set_ad(req, assoclen);
652
653 memset(iv, 0, ivlen);
654 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
655 min(ivlen, 8));
656
657 ESP_SKB_CB(skb)->tmp = tmp;
658 err = crypto_aead_encrypt(req);
659
660 switch (err) {
661 case -EINPROGRESS:
662 goto error;
663
664 case -ENOSPC:
665 err = NET_XMIT_DROP;
666 break;
667
668 case 0:
669 if ((x->props.flags & XFRM_STATE_ESN))
670 esp_output_restore_header(skb);
671 esp_output_encap_csum(skb);
672 }
673
674 if (sg != dsg)
675 esp_ssg_unref(x, tmp, skb);
676
677 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
678 err = esp_output_tail_tcp(x, skb);
679
680 error_free:
681 kfree(tmp);
682 error:
683 return err;
684 }
685 EXPORT_SYMBOL_GPL(esp6_output_tail);
686
esp6_output(struct xfrm_state * x,struct sk_buff * skb)687 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
688 {
689 int alen;
690 int blksize;
691 struct ip_esp_hdr *esph;
692 struct crypto_aead *aead;
693 struct esp_info esp;
694
695 esp.inplace = true;
696
697 esp.proto = *skb_mac_header(skb);
698 *skb_mac_header(skb) = IPPROTO_ESP;
699
700 /* skb is pure payload to encrypt */
701
702 aead = x->data;
703 alen = crypto_aead_authsize(aead);
704
705 esp.tfclen = 0;
706 if (x->tfcpad) {
707 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
708 u32 padto;
709
710 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
711 if (skb->len < padto)
712 esp.tfclen = padto - skb->len;
713 }
714 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
715 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
716 esp.plen = esp.clen - skb->len - esp.tfclen;
717 esp.tailen = esp.tfclen + esp.plen + alen;
718
719 esp.esph = ip_esp_hdr(skb);
720
721 esp.nfrags = esp6_output_head(x, skb, &esp);
722 if (esp.nfrags < 0)
723 return esp.nfrags;
724
725 esph = esp.esph;
726 esph->spi = x->id.spi;
727
728 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
729 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
730 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
731
732 skb_push(skb, -skb_network_offset(skb));
733
734 return esp6_output_tail(x, skb, &esp);
735 }
736
esp_remove_trailer(struct sk_buff * skb)737 static inline int esp_remove_trailer(struct sk_buff *skb)
738 {
739 struct xfrm_state *x = xfrm_input_state(skb);
740 struct crypto_aead *aead = x->data;
741 int alen, hlen, elen;
742 int padlen, trimlen;
743 __wsum csumdiff;
744 u8 nexthdr[2];
745 int ret;
746
747 alen = crypto_aead_authsize(aead);
748 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
749 elen = skb->len - hlen;
750
751 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
752 BUG_ON(ret);
753
754 ret = -EINVAL;
755 padlen = nexthdr[0];
756 if (padlen + 2 + alen >= elen) {
757 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
758 padlen + 2, elen - alen);
759 goto out;
760 }
761
762 trimlen = alen + padlen + 2;
763 if (skb->ip_summed == CHECKSUM_COMPLETE) {
764 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
765 skb->csum = csum_block_sub(skb->csum, csumdiff,
766 skb->len - trimlen);
767 }
768 ret = pskb_trim(skb, skb->len - trimlen);
769 if (unlikely(ret))
770 return ret;
771
772 ret = nexthdr[1];
773
774 out:
775 return ret;
776 }
777
esp6_input_done2(struct sk_buff * skb,int err)778 int esp6_input_done2(struct sk_buff *skb, int err)
779 {
780 struct xfrm_state *x = xfrm_input_state(skb);
781 struct xfrm_offload *xo = xfrm_offload(skb);
782 struct crypto_aead *aead = x->data;
783 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
784 int hdr_len = skb_network_header_len(skb);
785
786 if (!xo || !(xo->flags & CRYPTO_DONE))
787 kfree(ESP_SKB_CB(skb)->tmp);
788
789 if (unlikely(err))
790 goto out;
791
792 err = esp_remove_trailer(skb);
793 if (unlikely(err < 0))
794 goto out;
795
796 if (x->encap) {
797 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
798 int offset = skb_network_offset(skb) + sizeof(*ip6h);
799 struct xfrm_encap_tmpl *encap = x->encap;
800 u8 nexthdr = ip6h->nexthdr;
801 __be16 frag_off, source;
802 struct udphdr *uh;
803 struct tcphdr *th;
804
805 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
806 if (offset == -1) {
807 err = -EINVAL;
808 goto out;
809 }
810
811 uh = (void *)(skb->data + offset);
812 th = (void *)(skb->data + offset);
813 hdr_len += offset;
814
815 switch (x->encap->encap_type) {
816 case TCP_ENCAP_ESPINTCP:
817 source = th->source;
818 break;
819 case UDP_ENCAP_ESPINUDP:
820 source = uh->source;
821 break;
822 default:
823 WARN_ON_ONCE(1);
824 err = -EINVAL;
825 goto out;
826 }
827
828 /*
829 * 1) if the NAT-T peer's IP or port changed then
830 * advertise the change to the keying daemon.
831 * This is an inbound SA, so just compare
832 * SRC ports.
833 */
834 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
835 source != encap->encap_sport) {
836 xfrm_address_t ipaddr;
837
838 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
839 km_new_mapping(x, &ipaddr, source);
840
841 /* XXX: perhaps add an extra
842 * policy check here, to see
843 * if we should allow or
844 * reject a packet from a
845 * different source
846 * address/port.
847 */
848 }
849
850 /*
851 * 2) ignore UDP/TCP checksums in case
852 * of NAT-T in Transport Mode, or
853 * perform other post-processing fixes
854 * as per draft-ietf-ipsec-udp-encaps-06,
855 * section 3.1.2
856 */
857 if (x->props.mode == XFRM_MODE_TRANSPORT)
858 skb->ip_summed = CHECKSUM_UNNECESSARY;
859 }
860
861 skb_postpull_rcsum(skb, skb_network_header(skb),
862 skb_network_header_len(skb));
863 skb_pull_rcsum(skb, hlen);
864 if (x->props.mode == XFRM_MODE_TUNNEL)
865 skb_reset_transport_header(skb);
866 else
867 skb_set_transport_header(skb, -hdr_len);
868
869 /* RFC4303: Drop dummy packets without any error */
870 if (err == IPPROTO_NONE)
871 err = -EINVAL;
872
873 out:
874 return err;
875 }
876 EXPORT_SYMBOL_GPL(esp6_input_done2);
877
esp_input_done(void * data,int err)878 static void esp_input_done(void *data, int err)
879 {
880 struct sk_buff *skb = data;
881
882 xfrm_input_resume(skb, esp6_input_done2(skb, err));
883 }
884
esp_input_restore_header(struct sk_buff * skb)885 static void esp_input_restore_header(struct sk_buff *skb)
886 {
887 esp_restore_header(skb, 0);
888 __skb_pull(skb, 4);
889 }
890
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)891 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
892 {
893 struct xfrm_state *x = xfrm_input_state(skb);
894
895 /* For ESN we move the header forward by 4 bytes to
896 * accommodate the high bits. We will move it back after
897 * decryption.
898 */
899 if ((x->props.flags & XFRM_STATE_ESN)) {
900 struct ip_esp_hdr *esph = skb_push(skb, 4);
901
902 *seqhi = esph->spi;
903 esph->spi = esph->seq_no;
904 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
905 }
906 }
907
esp_input_done_esn(void * data,int err)908 static void esp_input_done_esn(void *data, int err)
909 {
910 struct sk_buff *skb = data;
911
912 esp_input_restore_header(skb);
913 esp_input_done(data, err);
914 }
915
esp6_input(struct xfrm_state * x,struct sk_buff * skb)916 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
917 {
918 struct crypto_aead *aead = x->data;
919 struct aead_request *req;
920 struct sk_buff *trailer;
921 int ivlen = crypto_aead_ivsize(aead);
922 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
923 int nfrags;
924 int assoclen;
925 int seqhilen;
926 int ret = 0;
927 void *tmp;
928 __be32 *seqhi;
929 u8 *iv;
930 struct scatterlist *sg;
931
932 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
933 ret = -EINVAL;
934 goto out;
935 }
936
937 if (elen <= 0) {
938 ret = -EINVAL;
939 goto out;
940 }
941
942 assoclen = sizeof(struct ip_esp_hdr);
943 seqhilen = 0;
944
945 if (x->props.flags & XFRM_STATE_ESN) {
946 seqhilen += sizeof(__be32);
947 assoclen += seqhilen;
948 }
949
950 if (!skb_cloned(skb)) {
951 if (!skb_is_nonlinear(skb)) {
952 nfrags = 1;
953
954 goto skip_cow;
955 } else if (!skb_has_frag_list(skb)) {
956 nfrags = skb_shinfo(skb)->nr_frags;
957 nfrags++;
958
959 goto skip_cow;
960 }
961 }
962
963 nfrags = skb_cow_data(skb, 0, &trailer);
964 if (nfrags < 0) {
965 ret = -EINVAL;
966 goto out;
967 }
968
969 skip_cow:
970 ret = -ENOMEM;
971 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
972 if (!tmp)
973 goto out;
974
975 ESP_SKB_CB(skb)->tmp = tmp;
976 seqhi = esp_tmp_extra(tmp);
977 iv = esp_tmp_iv(aead, tmp, seqhilen);
978 req = esp_tmp_req(aead, iv);
979 sg = esp_req_sg(aead, req);
980
981 esp_input_set_header(skb, seqhi);
982
983 sg_init_table(sg, nfrags);
984 ret = skb_to_sgvec(skb, sg, 0, skb->len);
985 if (unlikely(ret < 0)) {
986 kfree(tmp);
987 goto out;
988 }
989
990 skb->ip_summed = CHECKSUM_NONE;
991
992 if ((x->props.flags & XFRM_STATE_ESN))
993 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
994 else
995 aead_request_set_callback(req, 0, esp_input_done, skb);
996
997 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
998 aead_request_set_ad(req, assoclen);
999
1000 ret = crypto_aead_decrypt(req);
1001 if (ret == -EINPROGRESS)
1002 goto out;
1003
1004 if ((x->props.flags & XFRM_STATE_ESN))
1005 esp_input_restore_header(skb);
1006
1007 ret = esp6_input_done2(skb, ret);
1008
1009 out:
1010 return ret;
1011 }
1012
esp6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)1013 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1014 u8 type, u8 code, int offset, __be32 info)
1015 {
1016 struct net *net = dev_net(skb->dev);
1017 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1018 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1019 struct xfrm_state *x;
1020
1021 if (type != ICMPV6_PKT_TOOBIG &&
1022 type != NDISC_REDIRECT)
1023 return 0;
1024
1025 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1026 esph->spi, IPPROTO_ESP, AF_INET6);
1027 if (!x)
1028 return 0;
1029
1030 if (type == NDISC_REDIRECT)
1031 ip6_redirect(skb, net, skb->dev->ifindex, 0,
1032 sock_net_uid(net, NULL));
1033 else
1034 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1035 xfrm_state_put(x);
1036
1037 return 0;
1038 }
1039
esp6_destroy(struct xfrm_state * x)1040 static void esp6_destroy(struct xfrm_state *x)
1041 {
1042 struct crypto_aead *aead = x->data;
1043
1044 if (!aead)
1045 return;
1046
1047 crypto_free_aead(aead);
1048 }
1049
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)1050 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1051 {
1052 char aead_name[CRYPTO_MAX_ALG_NAME];
1053 struct crypto_aead *aead;
1054 int err;
1055
1056 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1057 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1058 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1059 return -ENAMETOOLONG;
1060 }
1061
1062 aead = crypto_alloc_aead(aead_name, 0, 0);
1063 err = PTR_ERR(aead);
1064 if (IS_ERR(aead))
1065 goto error;
1066
1067 x->data = aead;
1068
1069 err = crypto_aead_setkey(aead, x->aead->alg_key,
1070 (x->aead->alg_key_len + 7) / 8);
1071 if (err)
1072 goto error;
1073
1074 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1075 if (err)
1076 goto error;
1077
1078 return 0;
1079
1080 error:
1081 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1082 return err;
1083 }
1084
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1085 static int esp_init_authenc(struct xfrm_state *x,
1086 struct netlink_ext_ack *extack)
1087 {
1088 struct crypto_aead *aead;
1089 struct crypto_authenc_key_param *param;
1090 struct rtattr *rta;
1091 char *key;
1092 char *p;
1093 char authenc_name[CRYPTO_MAX_ALG_NAME];
1094 unsigned int keylen;
1095 int err;
1096
1097 err = -ENAMETOOLONG;
1098
1099 if ((x->props.flags & XFRM_STATE_ESN)) {
1100 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1101 "%s%sauthencesn(%s,%s)%s",
1102 x->geniv ?: "", x->geniv ? "(" : "",
1103 x->aalg ? x->aalg->alg_name : "digest_null",
1104 x->ealg->alg_name,
1105 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1106 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1107 goto error;
1108 }
1109 } else {
1110 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1111 "%s%sauthenc(%s,%s)%s",
1112 x->geniv ?: "", x->geniv ? "(" : "",
1113 x->aalg ? x->aalg->alg_name : "digest_null",
1114 x->ealg->alg_name,
1115 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1116 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1117 goto error;
1118 }
1119 }
1120
1121 aead = crypto_alloc_aead(authenc_name, 0, 0);
1122 err = PTR_ERR(aead);
1123 if (IS_ERR(aead)) {
1124 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1125 goto error;
1126 }
1127
1128 x->data = aead;
1129
1130 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1131 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1132 err = -ENOMEM;
1133 key = kmalloc(keylen, GFP_KERNEL);
1134 if (!key)
1135 goto error;
1136
1137 p = key;
1138 rta = (void *)p;
1139 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1140 rta->rta_len = RTA_LENGTH(sizeof(*param));
1141 param = RTA_DATA(rta);
1142 p += RTA_SPACE(sizeof(*param));
1143
1144 if (x->aalg) {
1145 struct xfrm_algo_desc *aalg_desc;
1146
1147 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1148 p += (x->aalg->alg_key_len + 7) / 8;
1149
1150 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1151 BUG_ON(!aalg_desc);
1152
1153 err = -EINVAL;
1154 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1155 crypto_aead_authsize(aead)) {
1156 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1157 goto free_key;
1158 }
1159
1160 err = crypto_aead_setauthsize(
1161 aead, x->aalg->alg_trunc_len / 8);
1162 if (err) {
1163 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1164 goto free_key;
1165 }
1166 }
1167
1168 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1169 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1170
1171 err = crypto_aead_setkey(aead, key, keylen);
1172
1173 free_key:
1174 kfree(key);
1175
1176 error:
1177 return err;
1178 }
1179
esp6_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1180 static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1181 {
1182 struct crypto_aead *aead;
1183 u32 align;
1184 int err;
1185
1186 x->data = NULL;
1187
1188 if (x->aead) {
1189 err = esp_init_aead(x, extack);
1190 } else if (x->ealg) {
1191 err = esp_init_authenc(x, extack);
1192 } else {
1193 NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1194 err = -EINVAL;
1195 }
1196
1197 if (err)
1198 goto error;
1199
1200 aead = x->data;
1201
1202 x->props.header_len = sizeof(struct ip_esp_hdr) +
1203 crypto_aead_ivsize(aead);
1204 switch (x->props.mode) {
1205 case XFRM_MODE_BEET:
1206 if (x->sel.family != AF_INET6)
1207 x->props.header_len += IPV4_BEET_PHMAXLEN +
1208 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1209 break;
1210 default:
1211 case XFRM_MODE_TRANSPORT:
1212 break;
1213 case XFRM_MODE_TUNNEL:
1214 x->props.header_len += sizeof(struct ipv6hdr);
1215 break;
1216 }
1217
1218 if (x->encap) {
1219 struct xfrm_encap_tmpl *encap = x->encap;
1220
1221 switch (encap->encap_type) {
1222 default:
1223 NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1224 err = -EINVAL;
1225 goto error;
1226 case UDP_ENCAP_ESPINUDP:
1227 x->props.header_len += sizeof(struct udphdr);
1228 break;
1229 #ifdef CONFIG_INET6_ESPINTCP
1230 case TCP_ENCAP_ESPINTCP:
1231 /* only the length field, TCP encap is done by
1232 * the socket
1233 */
1234 x->props.header_len += 2;
1235 break;
1236 #endif
1237 }
1238 }
1239
1240 align = ALIGN(crypto_aead_blocksize(aead), 4);
1241 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1242
1243 error:
1244 return err;
1245 }
1246
esp6_rcv_cb(struct sk_buff * skb,int err)1247 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1248 {
1249 return 0;
1250 }
1251
1252 static const struct xfrm_type esp6_type = {
1253 .owner = THIS_MODULE,
1254 .proto = IPPROTO_ESP,
1255 .flags = XFRM_TYPE_REPLAY_PROT,
1256 .init_state = esp6_init_state,
1257 .destructor = esp6_destroy,
1258 .input = esp6_input,
1259 .output = esp6_output,
1260 };
1261
1262 static struct xfrm6_protocol esp6_protocol = {
1263 .handler = xfrm6_rcv,
1264 .input_handler = xfrm_input,
1265 .cb_handler = esp6_rcv_cb,
1266 .err_handler = esp6_err,
1267 .priority = 0,
1268 };
1269
esp6_init(void)1270 static int __init esp6_init(void)
1271 {
1272 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1273 pr_info("%s: can't add xfrm type\n", __func__);
1274 return -EAGAIN;
1275 }
1276 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1277 pr_info("%s: can't add protocol\n", __func__);
1278 xfrm_unregister_type(&esp6_type, AF_INET6);
1279 return -EAGAIN;
1280 }
1281
1282 return 0;
1283 }
1284
esp6_fini(void)1285 static void __exit esp6_fini(void)
1286 {
1287 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1288 pr_info("%s: can't remove protocol\n", __func__);
1289 xfrm_unregister_type(&esp6_type, AF_INET6);
1290 }
1291
1292 module_init(esp6_init);
1293 module_exit(esp6_fini);
1294
1295 MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
1296 MODULE_LICENSE("GPL");
1297 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1298