1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "IPsec: " fmt
3
4 #include <crypto/aead.h>
5 #include <crypto/authenc.h>
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <net/ip.h>
9 #include <net/xfrm.h>
10 #include <net/esp.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kernel.h>
13 #include <linux/pfkeyv2.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/in6.h>
18 #include <net/icmp.h>
19 #include <net/protocol.h>
20 #include <net/udp.h>
21 #include <net/tcp.h>
22 #include <net/espintcp.h>
23 #include <linux/skbuff_ref.h>
24
25 #include <linux/highmem.h>
26
27 struct esp_skb_cb {
28 struct xfrm_skb_cb xfrm;
29 void *tmp;
30 };
31
32 struct esp_output_extra {
33 __be32 seqhi;
34 u32 esphoff;
35 };
36
37 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
38
39 /*
40 * Allocate an AEAD request structure with extra space for SG and IV.
41 *
42 * For alignment considerations the IV is placed at the front, followed
43 * by the request and finally the SG list.
44 *
45 * TODO: Use spare space in skb for this where possible.
46 */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int extralen)47 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
48 {
49 unsigned int len;
50
51 len = extralen;
52
53 len += crypto_aead_ivsize(aead);
54
55 if (len) {
56 len += crypto_aead_alignmask(aead) &
57 ~(crypto_tfm_ctx_alignment() - 1);
58 len = ALIGN(len, crypto_tfm_ctx_alignment());
59 }
60
61 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
62 len = ALIGN(len, __alignof__(struct scatterlist));
63
64 len += sizeof(struct scatterlist) * nfrags;
65
66 return kmalloc(len, GFP_ATOMIC);
67 }
68
esp_tmp_extra(void * tmp)69 static inline void *esp_tmp_extra(void *tmp)
70 {
71 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
72 }
73
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int extralen)74 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
75 {
76 return crypto_aead_ivsize(aead) ?
77 PTR_ALIGN((u8 *)tmp + extralen,
78 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
79 }
80
esp_tmp_req(struct crypto_aead * aead,u8 * iv)81 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
82 {
83 struct aead_request *req;
84
85 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
86 crypto_tfm_ctx_alignment());
87 aead_request_set_tfm(req, aead);
88 return req;
89 }
90
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)91 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
92 struct aead_request *req)
93 {
94 return (void *)ALIGN((unsigned long)(req + 1) +
95 crypto_aead_reqsize(aead),
96 __alignof__(struct scatterlist));
97 }
98
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)99 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
100 {
101 struct crypto_aead *aead = x->data;
102 int extralen = 0;
103 u8 *iv;
104 struct aead_request *req;
105 struct scatterlist *sg;
106
107 if (x->props.flags & XFRM_STATE_ESN)
108 extralen += sizeof(struct esp_output_extra);
109
110 iv = esp_tmp_iv(aead, tmp, extralen);
111 req = esp_tmp_req(aead, iv);
112
113 /* Unref skb_frag_pages in the src scatterlist if necessary.
114 * Skip the first sg which comes from skb->data.
115 */
116 if (req->src != req->dst)
117 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
118 skb_page_unref(page_to_netmem(sg_page(sg)),
119 skb->pp_recycle);
120 }
121
122 #ifdef CONFIG_INET_ESPINTCP
123 struct esp_tcp_sk {
124 struct sock *sk;
125 struct rcu_head rcu;
126 };
127
esp_free_tcp_sk(struct rcu_head * head)128 static void esp_free_tcp_sk(struct rcu_head *head)
129 {
130 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
131
132 sock_put(esk->sk);
133 kfree(esk);
134 }
135
esp_find_tcp_sk(struct xfrm_state * x)136 static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
137 {
138 struct xfrm_encap_tmpl *encap = x->encap;
139 struct net *net = xs_net(x);
140 struct esp_tcp_sk *esk;
141 __be16 sport, dport;
142 struct sock *nsk;
143 struct sock *sk;
144
145 sk = rcu_dereference(x->encap_sk);
146 if (sk && sk->sk_state == TCP_ESTABLISHED)
147 return sk;
148
149 spin_lock_bh(&x->lock);
150 sport = encap->encap_sport;
151 dport = encap->encap_dport;
152 nsk = rcu_dereference_protected(x->encap_sk,
153 lockdep_is_held(&x->lock));
154 if (sk && sk == nsk) {
155 esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
156 if (!esk) {
157 spin_unlock_bh(&x->lock);
158 return ERR_PTR(-ENOMEM);
159 }
160 RCU_INIT_POINTER(x->encap_sk, NULL);
161 esk->sk = sk;
162 call_rcu(&esk->rcu, esp_free_tcp_sk);
163 }
164 spin_unlock_bh(&x->lock);
165
166 sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
167 dport, x->props.saddr.a4, sport, 0);
168 if (!sk)
169 return ERR_PTR(-ENOENT);
170
171 if (!tcp_is_ulp_esp(sk)) {
172 sock_put(sk);
173 return ERR_PTR(-EINVAL);
174 }
175
176 spin_lock_bh(&x->lock);
177 nsk = rcu_dereference_protected(x->encap_sk,
178 lockdep_is_held(&x->lock));
179 if (encap->encap_sport != sport ||
180 encap->encap_dport != dport) {
181 sock_put(sk);
182 sk = nsk ?: ERR_PTR(-EREMCHG);
183 } else if (sk == nsk) {
184 sock_put(sk);
185 } else {
186 rcu_assign_pointer(x->encap_sk, sk);
187 }
188 spin_unlock_bh(&x->lock);
189
190 return sk;
191 }
192
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)193 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
194 {
195 struct sock *sk;
196 int err;
197
198 rcu_read_lock();
199
200 sk = esp_find_tcp_sk(x);
201 err = PTR_ERR_OR_ZERO(sk);
202 if (err) {
203 kfree_skb(skb);
204 goto out;
205 }
206
207 bh_lock_sock(sk);
208 if (sock_owned_by_user(sk))
209 err = espintcp_queue_out(sk, skb);
210 else
211 err = espintcp_push_skb(sk, skb);
212 bh_unlock_sock(sk);
213
214 out:
215 rcu_read_unlock();
216 return err;
217 }
218
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)219 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
220 struct sk_buff *skb)
221 {
222 struct dst_entry *dst = skb_dst(skb);
223 struct xfrm_state *x = dst->xfrm;
224
225 return esp_output_tcp_finish(x, skb);
226 }
227
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)228 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
229 {
230 int err;
231
232 local_bh_disable();
233 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
234 local_bh_enable();
235
236 /* EINPROGRESS just happens to do the right thing. It
237 * actually means that the skb has been consumed and
238 * isn't coming back.
239 */
240 return err ?: -EINPROGRESS;
241 }
242 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)243 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
244 {
245 WARN_ON(1);
246 return -EOPNOTSUPP;
247 }
248 #endif
249
esp_output_done(void * data,int err)250 static void esp_output_done(void *data, int err)
251 {
252 struct sk_buff *skb = data;
253 struct xfrm_offload *xo = xfrm_offload(skb);
254 void *tmp;
255 struct xfrm_state *x;
256
257 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
258 struct sec_path *sp = skb_sec_path(skb);
259
260 x = sp->xvec[sp->len - 1];
261 } else {
262 x = skb_dst(skb)->xfrm;
263 }
264
265 tmp = ESP_SKB_CB(skb)->tmp;
266 esp_ssg_unref(x, tmp, skb);
267 kfree(tmp);
268
269 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
270 if (err) {
271 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
272 kfree_skb(skb);
273 return;
274 }
275
276 skb_push(skb, skb->data - skb_mac_header(skb));
277 secpath_reset(skb);
278 xfrm_dev_resume(skb);
279 } else {
280 if (!err &&
281 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
282 esp_output_tail_tcp(x, skb);
283 else
284 xfrm_output_resume(skb->sk, skb, err);
285 }
286 }
287
288 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)289 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
290 {
291 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
292 void *tmp = ESP_SKB_CB(skb)->tmp;
293 __be32 *seqhi = esp_tmp_extra(tmp);
294
295 esph->seq_no = esph->spi;
296 esph->spi = *seqhi;
297 }
298
esp_output_restore_header(struct sk_buff * skb)299 static void esp_output_restore_header(struct sk_buff *skb)
300 {
301 void *tmp = ESP_SKB_CB(skb)->tmp;
302 struct esp_output_extra *extra = esp_tmp_extra(tmp);
303
304 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
305 sizeof(__be32));
306 }
307
esp_output_set_extra(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)308 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
309 struct xfrm_state *x,
310 struct ip_esp_hdr *esph,
311 struct esp_output_extra *extra)
312 {
313 /* For ESN we move the header forward by 4 bytes to
314 * accommodate the high bits. We will move it back after
315 * encryption.
316 */
317 if ((x->props.flags & XFRM_STATE_ESN)) {
318 __u32 seqhi;
319 struct xfrm_offload *xo = xfrm_offload(skb);
320
321 if (xo)
322 seqhi = xo->seq.hi;
323 else
324 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
325
326 extra->esphoff = (unsigned char *)esph -
327 skb_transport_header(skb);
328 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
329 extra->seqhi = esph->spi;
330 esph->seq_no = htonl(seqhi);
331 }
332
333 esph->spi = x->id.spi;
334
335 return esph;
336 }
337
esp_output_done_esn(void * data,int err)338 static void esp_output_done_esn(void *data, int err)
339 {
340 struct sk_buff *skb = data;
341
342 esp_output_restore_header(skb);
343 esp_output_done(data, err);
344 }
345
esp_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)346 static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
347 int encap_type,
348 struct esp_info *esp,
349 __be16 sport,
350 __be16 dport)
351 {
352 struct udphdr *uh;
353 unsigned int len;
354 struct xfrm_offload *xo = xfrm_offload(skb);
355
356 len = skb->len + esp->tailen - skb_transport_offset(skb);
357 if (len + sizeof(struct iphdr) > IP_MAX_MTU)
358 return ERR_PTR(-EMSGSIZE);
359
360 uh = (struct udphdr *)esp->esph;
361 uh->source = sport;
362 uh->dest = dport;
363 uh->len = htons(len);
364 uh->check = 0;
365
366 /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
367 * data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
368 * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
369 */
370 if (!xo || encap_type != UDP_ENCAP_ESPINUDP)
371 *skb_mac_header(skb) = IPPROTO_UDP;
372
373 return (struct ip_esp_hdr *)(uh + 1);
374 }
375
376 #ifdef CONFIG_INET_ESPINTCP
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)377 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
378 struct sk_buff *skb,
379 struct esp_info *esp)
380 {
381 __be16 *lenp = (void *)esp->esph;
382 struct ip_esp_hdr *esph;
383 unsigned int len;
384 struct sock *sk;
385
386 len = skb->len + esp->tailen - skb_transport_offset(skb);
387 if (len > IP_MAX_MTU)
388 return ERR_PTR(-EMSGSIZE);
389
390 rcu_read_lock();
391 sk = esp_find_tcp_sk(x);
392 rcu_read_unlock();
393
394 if (IS_ERR(sk))
395 return ERR_CAST(sk);
396
397 *lenp = htons(len);
398 esph = (struct ip_esp_hdr *)(lenp + 1);
399
400 return esph;
401 }
402 #else
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)403 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
404 struct sk_buff *skb,
405 struct esp_info *esp)
406 {
407 return ERR_PTR(-EOPNOTSUPP);
408 }
409 #endif
410
esp_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)411 static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
412 struct esp_info *esp)
413 {
414 struct xfrm_encap_tmpl *encap = x->encap;
415 struct ip_esp_hdr *esph;
416 __be16 sport, dport;
417 int encap_type;
418
419 spin_lock_bh(&x->lock);
420 sport = encap->encap_sport;
421 dport = encap->encap_dport;
422 encap_type = encap->encap_type;
423 spin_unlock_bh(&x->lock);
424
425 switch (encap_type) {
426 default:
427 case UDP_ENCAP_ESPINUDP:
428 esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
429 break;
430 case TCP_ENCAP_ESPINTCP:
431 esph = esp_output_tcp_encap(x, skb, esp);
432 break;
433 }
434
435 if (IS_ERR(esph))
436 return PTR_ERR(esph);
437
438 esp->esph = esph;
439
440 return 0;
441 }
442
esp_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)443 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
444 {
445 u8 *tail;
446 int nfrags;
447 int esph_offset;
448 struct page *page;
449 struct sk_buff *trailer;
450 int tailen = esp->tailen;
451
452 /* this is non-NULL only with TCP/UDP Encapsulation */
453 if (x->encap) {
454 int err = esp_output_encap(x, skb, esp);
455
456 if (err < 0)
457 return err;
458 }
459
460 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
461 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
462 goto cow;
463
464 if (!skb_cloned(skb)) {
465 if (tailen <= skb_tailroom(skb)) {
466 nfrags = 1;
467 trailer = skb;
468 tail = skb_tail_pointer(trailer);
469
470 goto skip_cow;
471 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
472 && !skb_has_frag_list(skb)) {
473 int allocsize;
474 struct sock *sk = skb->sk;
475 struct page_frag *pfrag = &x->xfrag;
476
477 esp->inplace = false;
478
479 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
480
481 spin_lock_bh(&x->lock);
482
483 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
484 spin_unlock_bh(&x->lock);
485 goto cow;
486 }
487
488 page = pfrag->page;
489 get_page(page);
490
491 tail = page_address(page) + pfrag->offset;
492
493 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
494
495 nfrags = skb_shinfo(skb)->nr_frags;
496
497 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
498 tailen);
499 skb_shinfo(skb)->nr_frags = ++nfrags;
500
501 pfrag->offset = pfrag->offset + allocsize;
502
503 spin_unlock_bh(&x->lock);
504
505 nfrags++;
506
507 skb_len_add(skb, tailen);
508 if (sk && sk_fullsock(sk))
509 refcount_add(tailen, &sk->sk_wmem_alloc);
510
511 goto out;
512 }
513 }
514
515 cow:
516 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
517
518 nfrags = skb_cow_data(skb, tailen, &trailer);
519 if (nfrags < 0)
520 goto out;
521 tail = skb_tail_pointer(trailer);
522 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
523
524 skip_cow:
525 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
526 pskb_put(skb, trailer, tailen);
527
528 out:
529 return nfrags;
530 }
531 EXPORT_SYMBOL_GPL(esp_output_head);
532
esp_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)533 int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
534 {
535 u8 *iv;
536 int alen;
537 void *tmp;
538 int ivlen;
539 int assoclen;
540 int extralen;
541 struct page *page;
542 struct ip_esp_hdr *esph;
543 struct crypto_aead *aead;
544 struct aead_request *req;
545 struct scatterlist *sg, *dsg;
546 struct esp_output_extra *extra;
547 int err = -ENOMEM;
548
549 assoclen = sizeof(struct ip_esp_hdr);
550 extralen = 0;
551
552 if (x->props.flags & XFRM_STATE_ESN) {
553 extralen += sizeof(*extra);
554 assoclen += sizeof(__be32);
555 }
556
557 aead = x->data;
558 alen = crypto_aead_authsize(aead);
559 ivlen = crypto_aead_ivsize(aead);
560
561 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
562 if (!tmp)
563 goto error;
564
565 extra = esp_tmp_extra(tmp);
566 iv = esp_tmp_iv(aead, tmp, extralen);
567 req = esp_tmp_req(aead, iv);
568 sg = esp_req_sg(aead, req);
569
570 if (esp->inplace)
571 dsg = sg;
572 else
573 dsg = &sg[esp->nfrags];
574
575 esph = esp_output_set_extra(skb, x, esp->esph, extra);
576 esp->esph = esph;
577
578 sg_init_table(sg, esp->nfrags);
579 err = skb_to_sgvec(skb, sg,
580 (unsigned char *)esph - skb->data,
581 assoclen + ivlen + esp->clen + alen);
582 if (unlikely(err < 0))
583 goto error_free;
584
585 if (!esp->inplace) {
586 int allocsize;
587 struct page_frag *pfrag = &x->xfrag;
588
589 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
590
591 spin_lock_bh(&x->lock);
592 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
593 spin_unlock_bh(&x->lock);
594 goto error_free;
595 }
596
597 skb_shinfo(skb)->nr_frags = 1;
598
599 page = pfrag->page;
600 get_page(page);
601 /* replace page frags in skb with new page */
602 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
603 pfrag->offset = pfrag->offset + allocsize;
604 spin_unlock_bh(&x->lock);
605
606 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
607 err = skb_to_sgvec(skb, dsg,
608 (unsigned char *)esph - skb->data,
609 assoclen + ivlen + esp->clen + alen);
610 if (unlikely(err < 0))
611 goto error_free;
612 }
613
614 if ((x->props.flags & XFRM_STATE_ESN))
615 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
616 else
617 aead_request_set_callback(req, 0, esp_output_done, skb);
618
619 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
620 aead_request_set_ad(req, assoclen);
621
622 memset(iv, 0, ivlen);
623 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
624 min(ivlen, 8));
625
626 ESP_SKB_CB(skb)->tmp = tmp;
627 err = crypto_aead_encrypt(req);
628
629 switch (err) {
630 case -EINPROGRESS:
631 goto error;
632
633 case -ENOSPC:
634 err = NET_XMIT_DROP;
635 break;
636
637 case 0:
638 if ((x->props.flags & XFRM_STATE_ESN))
639 esp_output_restore_header(skb);
640 }
641
642 if (sg != dsg)
643 esp_ssg_unref(x, tmp, skb);
644
645 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
646 err = esp_output_tail_tcp(x, skb);
647
648 error_free:
649 kfree(tmp);
650 error:
651 return err;
652 }
653 EXPORT_SYMBOL_GPL(esp_output_tail);
654
esp_output(struct xfrm_state * x,struct sk_buff * skb)655 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
656 {
657 int alen;
658 int blksize;
659 struct ip_esp_hdr *esph;
660 struct crypto_aead *aead;
661 struct esp_info esp;
662
663 esp.inplace = true;
664
665 esp.proto = *skb_mac_header(skb);
666 *skb_mac_header(skb) = IPPROTO_ESP;
667
668 /* skb is pure payload to encrypt */
669
670 aead = x->data;
671 alen = crypto_aead_authsize(aead);
672
673 esp.tfclen = 0;
674 if (x->tfcpad) {
675 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
676 u32 padto;
677
678 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
679 if (skb->len < padto)
680 esp.tfclen = padto - skb->len;
681 }
682 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
683 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
684 esp.plen = esp.clen - skb->len - esp.tfclen;
685 esp.tailen = esp.tfclen + esp.plen + alen;
686
687 esp.esph = ip_esp_hdr(skb);
688
689 esp.nfrags = esp_output_head(x, skb, &esp);
690 if (esp.nfrags < 0)
691 return esp.nfrags;
692
693 esph = esp.esph;
694 esph->spi = x->id.spi;
695
696 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
697 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
698 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
699
700 skb_push(skb, -skb_network_offset(skb));
701
702 return esp_output_tail(x, skb, &esp);
703 }
704
esp_remove_trailer(struct sk_buff * skb)705 static inline int esp_remove_trailer(struct sk_buff *skb)
706 {
707 struct xfrm_state *x = xfrm_input_state(skb);
708 struct crypto_aead *aead = x->data;
709 int alen, hlen, elen;
710 int padlen, trimlen;
711 __wsum csumdiff;
712 u8 nexthdr[2];
713 int ret;
714
715 alen = crypto_aead_authsize(aead);
716 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
717 elen = skb->len - hlen;
718
719 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
720 BUG();
721
722 ret = -EINVAL;
723 padlen = nexthdr[0];
724 if (padlen + 2 + alen >= elen) {
725 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
726 padlen + 2, elen - alen);
727 goto out;
728 }
729
730 trimlen = alen + padlen + 2;
731 if (skb->ip_summed == CHECKSUM_COMPLETE) {
732 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
733 skb->csum = csum_block_sub(skb->csum, csumdiff,
734 skb->len - trimlen);
735 }
736 ret = pskb_trim(skb, skb->len - trimlen);
737 if (unlikely(ret))
738 return ret;
739
740 ret = nexthdr[1];
741
742 out:
743 return ret;
744 }
745
esp_input_done2(struct sk_buff * skb,int err)746 int esp_input_done2(struct sk_buff *skb, int err)
747 {
748 const struct iphdr *iph;
749 struct xfrm_state *x = xfrm_input_state(skb);
750 struct xfrm_offload *xo = xfrm_offload(skb);
751 struct crypto_aead *aead = x->data;
752 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
753 int ihl;
754
755 if (!xo || !(xo->flags & CRYPTO_DONE))
756 kfree(ESP_SKB_CB(skb)->tmp);
757
758 if (unlikely(err))
759 goto out;
760
761 err = esp_remove_trailer(skb);
762 if (unlikely(err < 0))
763 goto out;
764
765 iph = ip_hdr(skb);
766 ihl = iph->ihl * 4;
767
768 if (x->encap) {
769 struct xfrm_encap_tmpl *encap = x->encap;
770 struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
771 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
772 __be16 source;
773
774 switch (x->encap->encap_type) {
775 case TCP_ENCAP_ESPINTCP:
776 source = th->source;
777 break;
778 case UDP_ENCAP_ESPINUDP:
779 source = uh->source;
780 break;
781 default:
782 WARN_ON_ONCE(1);
783 err = -EINVAL;
784 goto out;
785 }
786
787 /*
788 * 1) if the NAT-T peer's IP or port changed then
789 * advertise the change to the keying daemon.
790 * This is an inbound SA, so just compare
791 * SRC ports.
792 */
793 if (iph->saddr != x->props.saddr.a4 ||
794 source != encap->encap_sport) {
795 xfrm_address_t ipaddr;
796
797 ipaddr.a4 = iph->saddr;
798 km_new_mapping(x, &ipaddr, source);
799
800 /* XXX: perhaps add an extra
801 * policy check here, to see
802 * if we should allow or
803 * reject a packet from a
804 * different source
805 * address/port.
806 */
807 }
808
809 /*
810 * 2) ignore UDP/TCP checksums in case
811 * of NAT-T in Transport Mode, or
812 * perform other post-processing fixes
813 * as per draft-ietf-ipsec-udp-encaps-06,
814 * section 3.1.2
815 */
816 if (x->props.mode == XFRM_MODE_TRANSPORT)
817 skb->ip_summed = CHECKSUM_UNNECESSARY;
818 }
819
820 skb_pull_rcsum(skb, hlen);
821 if (x->props.mode == XFRM_MODE_TUNNEL)
822 skb_reset_transport_header(skb);
823 else
824 skb_set_transport_header(skb, -ihl);
825
826 /* RFC4303: Drop dummy packets without any error */
827 if (err == IPPROTO_NONE)
828 err = -EINVAL;
829
830 out:
831 return err;
832 }
833 EXPORT_SYMBOL_GPL(esp_input_done2);
834
esp_input_done(void * data,int err)835 static void esp_input_done(void *data, int err)
836 {
837 struct sk_buff *skb = data;
838
839 xfrm_input_resume(skb, esp_input_done2(skb, err));
840 }
841
esp_input_restore_header(struct sk_buff * skb)842 static void esp_input_restore_header(struct sk_buff *skb)
843 {
844 esp_restore_header(skb, 0);
845 __skb_pull(skb, 4);
846 }
847
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)848 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
849 {
850 struct xfrm_state *x = xfrm_input_state(skb);
851 struct ip_esp_hdr *esph;
852
853 /* For ESN we move the header forward by 4 bytes to
854 * accommodate the high bits. We will move it back after
855 * decryption.
856 */
857 if ((x->props.flags & XFRM_STATE_ESN)) {
858 esph = skb_push(skb, 4);
859 *seqhi = esph->spi;
860 esph->spi = esph->seq_no;
861 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
862 }
863 }
864
esp_input_done_esn(void * data,int err)865 static void esp_input_done_esn(void *data, int err)
866 {
867 struct sk_buff *skb = data;
868
869 esp_input_restore_header(skb);
870 esp_input_done(data, err);
871 }
872
873 /*
874 * Note: detecting truncated vs. non-truncated authentication data is very
875 * expensive, so we only support truncated data, which is the recommended
876 * and common case.
877 */
esp_input(struct xfrm_state * x,struct sk_buff * skb)878 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
879 {
880 struct crypto_aead *aead = x->data;
881 struct aead_request *req;
882 struct sk_buff *trailer;
883 int ivlen = crypto_aead_ivsize(aead);
884 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
885 int nfrags;
886 int assoclen;
887 int seqhilen;
888 __be32 *seqhi;
889 void *tmp;
890 u8 *iv;
891 struct scatterlist *sg;
892 int err = -EINVAL;
893
894 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
895 goto out;
896
897 if (elen <= 0)
898 goto out;
899
900 assoclen = sizeof(struct ip_esp_hdr);
901 seqhilen = 0;
902
903 if (x->props.flags & XFRM_STATE_ESN) {
904 seqhilen += sizeof(__be32);
905 assoclen += seqhilen;
906 }
907
908 if (!skb_cloned(skb)) {
909 if (!skb_is_nonlinear(skb)) {
910 nfrags = 1;
911
912 goto skip_cow;
913 } else if (!skb_has_frag_list(skb)) {
914 nfrags = skb_shinfo(skb)->nr_frags;
915 nfrags++;
916
917 goto skip_cow;
918 }
919 }
920
921 err = skb_cow_data(skb, 0, &trailer);
922 if (err < 0)
923 goto out;
924
925 nfrags = err;
926
927 skip_cow:
928 err = -ENOMEM;
929 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
930 if (!tmp)
931 goto out;
932
933 ESP_SKB_CB(skb)->tmp = tmp;
934 seqhi = esp_tmp_extra(tmp);
935 iv = esp_tmp_iv(aead, tmp, seqhilen);
936 req = esp_tmp_req(aead, iv);
937 sg = esp_req_sg(aead, req);
938
939 esp_input_set_header(skb, seqhi);
940
941 sg_init_table(sg, nfrags);
942 err = skb_to_sgvec(skb, sg, 0, skb->len);
943 if (unlikely(err < 0)) {
944 kfree(tmp);
945 goto out;
946 }
947
948 skb->ip_summed = CHECKSUM_NONE;
949
950 if ((x->props.flags & XFRM_STATE_ESN))
951 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
952 else
953 aead_request_set_callback(req, 0, esp_input_done, skb);
954
955 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
956 aead_request_set_ad(req, assoclen);
957
958 err = crypto_aead_decrypt(req);
959 if (err == -EINPROGRESS)
960 goto out;
961
962 if ((x->props.flags & XFRM_STATE_ESN))
963 esp_input_restore_header(skb);
964
965 err = esp_input_done2(skb, err);
966
967 out:
968 return err;
969 }
970
esp4_err(struct sk_buff * skb,u32 info)971 static int esp4_err(struct sk_buff *skb, u32 info)
972 {
973 struct net *net = dev_net(skb->dev);
974 const struct iphdr *iph = (const struct iphdr *)skb->data;
975 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
976 struct xfrm_state *x;
977
978 switch (icmp_hdr(skb)->type) {
979 case ICMP_DEST_UNREACH:
980 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
981 return 0;
982 break;
983 case ICMP_REDIRECT:
984 break;
985 default:
986 return 0;
987 }
988
989 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
990 esph->spi, IPPROTO_ESP, AF_INET);
991 if (!x)
992 return 0;
993
994 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
995 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
996 else
997 ipv4_redirect(skb, net, 0, IPPROTO_ESP);
998 xfrm_state_put(x);
999
1000 return 0;
1001 }
1002
esp_destroy(struct xfrm_state * x)1003 static void esp_destroy(struct xfrm_state *x)
1004 {
1005 struct crypto_aead *aead = x->data;
1006
1007 if (!aead)
1008 return;
1009
1010 crypto_free_aead(aead);
1011 }
1012
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)1013 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1014 {
1015 char aead_name[CRYPTO_MAX_ALG_NAME];
1016 struct crypto_aead *aead;
1017 int err;
1018
1019 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1020 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1021 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1022 return -ENAMETOOLONG;
1023 }
1024
1025 aead = crypto_alloc_aead(aead_name, 0, 0);
1026 err = PTR_ERR(aead);
1027 if (IS_ERR(aead))
1028 goto error;
1029
1030 x->data = aead;
1031
1032 err = crypto_aead_setkey(aead, x->aead->alg_key,
1033 (x->aead->alg_key_len + 7) / 8);
1034 if (err)
1035 goto error;
1036
1037 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1038 if (err)
1039 goto error;
1040
1041 return 0;
1042
1043 error:
1044 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1045 return err;
1046 }
1047
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1048 static int esp_init_authenc(struct xfrm_state *x,
1049 struct netlink_ext_ack *extack)
1050 {
1051 struct crypto_aead *aead;
1052 struct crypto_authenc_key_param *param;
1053 struct rtattr *rta;
1054 char *key;
1055 char *p;
1056 char authenc_name[CRYPTO_MAX_ALG_NAME];
1057 unsigned int keylen;
1058 int err;
1059
1060 err = -ENAMETOOLONG;
1061
1062 if ((x->props.flags & XFRM_STATE_ESN)) {
1063 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1064 "%s%sauthencesn(%s,%s)%s",
1065 x->geniv ?: "", x->geniv ? "(" : "",
1066 x->aalg ? x->aalg->alg_name : "digest_null",
1067 x->ealg->alg_name,
1068 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1069 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1070 goto error;
1071 }
1072 } else {
1073 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1074 "%s%sauthenc(%s,%s)%s",
1075 x->geniv ?: "", x->geniv ? "(" : "",
1076 x->aalg ? x->aalg->alg_name : "digest_null",
1077 x->ealg->alg_name,
1078 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1079 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1080 goto error;
1081 }
1082 }
1083
1084 aead = crypto_alloc_aead(authenc_name, 0, 0);
1085 err = PTR_ERR(aead);
1086 if (IS_ERR(aead)) {
1087 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1088 goto error;
1089 }
1090
1091 x->data = aead;
1092
1093 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1094 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1095 err = -ENOMEM;
1096 key = kmalloc(keylen, GFP_KERNEL);
1097 if (!key)
1098 goto error;
1099
1100 p = key;
1101 rta = (void *)p;
1102 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1103 rta->rta_len = RTA_LENGTH(sizeof(*param));
1104 param = RTA_DATA(rta);
1105 p += RTA_SPACE(sizeof(*param));
1106
1107 if (x->aalg) {
1108 struct xfrm_algo_desc *aalg_desc;
1109
1110 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1111 p += (x->aalg->alg_key_len + 7) / 8;
1112
1113 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1114 BUG_ON(!aalg_desc);
1115
1116 err = -EINVAL;
1117 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1118 crypto_aead_authsize(aead)) {
1119 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1120 goto free_key;
1121 }
1122
1123 err = crypto_aead_setauthsize(
1124 aead, x->aalg->alg_trunc_len / 8);
1125 if (err) {
1126 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1127 goto free_key;
1128 }
1129 }
1130
1131 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1132 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1133
1134 err = crypto_aead_setkey(aead, key, keylen);
1135
1136 free_key:
1137 kfree_sensitive(key);
1138
1139 error:
1140 return err;
1141 }
1142
esp_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1143 static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1144 {
1145 struct crypto_aead *aead;
1146 u32 align;
1147 int err;
1148
1149 x->data = NULL;
1150
1151 if (x->aead) {
1152 err = esp_init_aead(x, extack);
1153 } else if (x->ealg) {
1154 err = esp_init_authenc(x, extack);
1155 } else {
1156 NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1157 err = -EINVAL;
1158 }
1159
1160 if (err)
1161 goto error;
1162
1163 aead = x->data;
1164
1165 x->props.header_len = sizeof(struct ip_esp_hdr) +
1166 crypto_aead_ivsize(aead);
1167 if (x->props.mode == XFRM_MODE_TUNNEL)
1168 x->props.header_len += sizeof(struct iphdr);
1169 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
1170 x->props.header_len += IPV4_BEET_PHMAXLEN;
1171 if (x->encap) {
1172 struct xfrm_encap_tmpl *encap = x->encap;
1173
1174 switch (encap->encap_type) {
1175 default:
1176 NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1177 err = -EINVAL;
1178 goto error;
1179 case UDP_ENCAP_ESPINUDP:
1180 x->props.header_len += sizeof(struct udphdr);
1181 break;
1182 #ifdef CONFIG_INET_ESPINTCP
1183 case TCP_ENCAP_ESPINTCP:
1184 /* only the length field, TCP encap is done by
1185 * the socket
1186 */
1187 x->props.header_len += 2;
1188 break;
1189 #endif
1190 }
1191 }
1192
1193 align = ALIGN(crypto_aead_blocksize(aead), 4);
1194 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1195
1196 error:
1197 return err;
1198 }
1199
esp4_rcv_cb(struct sk_buff * skb,int err)1200 static int esp4_rcv_cb(struct sk_buff *skb, int err)
1201 {
1202 return 0;
1203 }
1204
1205 static const struct xfrm_type esp_type =
1206 {
1207 .owner = THIS_MODULE,
1208 .proto = IPPROTO_ESP,
1209 .flags = XFRM_TYPE_REPLAY_PROT,
1210 .init_state = esp_init_state,
1211 .destructor = esp_destroy,
1212 .input = esp_input,
1213 .output = esp_output,
1214 };
1215
1216 static struct xfrm4_protocol esp4_protocol = {
1217 .handler = xfrm4_rcv,
1218 .input_handler = xfrm_input,
1219 .cb_handler = esp4_rcv_cb,
1220 .err_handler = esp4_err,
1221 .priority = 0,
1222 };
1223
esp4_init(void)1224 static int __init esp4_init(void)
1225 {
1226 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1227 pr_info("%s: can't add xfrm type\n", __func__);
1228 return -EAGAIN;
1229 }
1230 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1231 pr_info("%s: can't add protocol\n", __func__);
1232 xfrm_unregister_type(&esp_type, AF_INET);
1233 return -EAGAIN;
1234 }
1235 return 0;
1236 }
1237
esp4_fini(void)1238 static void __exit esp4_fini(void)
1239 {
1240 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1241 pr_info("%s: can't remove protocol\n", __func__);
1242 xfrm_unregister_type(&esp_type, AF_INET);
1243 }
1244
1245 module_init(esp4_init);
1246 module_exit(esp4_fini);
1247 MODULE_DESCRIPTION("IPv4 ESP transformation library");
1248 MODULE_LICENSE("GPL");
1249 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
1250