1 #define pr_fmt(fmt) "IPsec: " fmt
2
3 #include <crypto/hash.h>
4 #include <linux/err.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/ah.h>
10 #include <linux/crypto.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/scatterlist.h>
13 #include <net/icmp.h>
14 #include <net/protocol.h>
15
16 struct ah_skb_cb {
17 struct xfrm_skb_cb xfrm;
18 void *tmp;
19 };
20
21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
22
ah_alloc_tmp(struct crypto_ahash * ahash,int nfrags,unsigned int size)23 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
24 unsigned int size)
25 {
26 unsigned int len;
27
28 len = size + crypto_ahash_digestsize(ahash) +
29 (crypto_ahash_alignmask(ahash) &
30 ~(crypto_tfm_ctx_alignment() - 1));
31
32 len = ALIGN(len, crypto_tfm_ctx_alignment());
33
34 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
35 len = ALIGN(len, __alignof__(struct scatterlist));
36
37 len += sizeof(struct scatterlist) * nfrags;
38
39 return kmalloc(len, GFP_ATOMIC);
40 }
41
ah_tmp_auth(void * tmp,unsigned int offset)42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
43 {
44 return tmp + offset;
45 }
46
ah_tmp_icv(struct crypto_ahash * ahash,void * tmp,unsigned int offset)47 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
48 unsigned int offset)
49 {
50 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
51 }
52
ah_tmp_req(struct crypto_ahash * ahash,u8 * icv)53 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
54 u8 *icv)
55 {
56 struct ahash_request *req;
57
58 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
59 crypto_tfm_ctx_alignment());
60
61 ahash_request_set_tfm(req, ahash);
62
63 return req;
64 }
65
ah_req_sg(struct crypto_ahash * ahash,struct ahash_request * req)66 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
67 struct ahash_request *req)
68 {
69 return (void *)ALIGN((unsigned long)(req + 1) +
70 crypto_ahash_reqsize(ahash),
71 __alignof__(struct scatterlist));
72 }
73
74 /* Clear mutable options and find final destination to substitute
75 * into IP header for icv calculation. Options are already checked
76 * for validity, so paranoia is not required. */
77
ip_clear_mutable_options(const struct iphdr * iph,__be32 * daddr)78 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
79 {
80 unsigned char *optptr = (unsigned char *)(iph+1);
81 int l = iph->ihl*4 - sizeof(struct iphdr);
82 int optlen;
83
84 while (l > 0) {
85 switch (*optptr) {
86 case IPOPT_END:
87 return 0;
88 case IPOPT_NOOP:
89 l--;
90 optptr++;
91 continue;
92 }
93 optlen = optptr[1];
94 if (optlen<2 || optlen>l)
95 return -EINVAL;
96 switch (*optptr) {
97 case IPOPT_SEC:
98 case 0x85: /* Some "Extended Security" crap. */
99 case IPOPT_CIPSO:
100 case IPOPT_RA:
101 case 0x80|21: /* RFC1770 */
102 break;
103 case IPOPT_LSRR:
104 case IPOPT_SSRR:
105 if (optlen < 6)
106 return -EINVAL;
107 memcpy(daddr, optptr+optlen-4, 4);
108 /* Fall through */
109 default:
110 memset(optptr, 0, optlen);
111 }
112 l -= optlen;
113 optptr += optlen;
114 }
115 return 0;
116 }
117
ah_output_done(struct crypto_async_request * base,int err)118 static void ah_output_done(struct crypto_async_request *base, int err)
119 {
120 u8 *icv;
121 struct iphdr *iph;
122 struct sk_buff *skb = base->data;
123 struct xfrm_state *x = skb_dst(skb)->xfrm;
124 struct ah_data *ahp = x->data;
125 struct iphdr *top_iph = ip_hdr(skb);
126 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
127 int ihl = ip_hdrlen(skb);
128
129 iph = AH_SKB_CB(skb)->tmp;
130 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
131 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
132
133 top_iph->tos = iph->tos;
134 top_iph->ttl = iph->ttl;
135 top_iph->frag_off = iph->frag_off;
136 if (top_iph->ihl != 5) {
137 top_iph->daddr = iph->daddr;
138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
139 }
140
141 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
143 }
144
ah_output(struct xfrm_state * x,struct sk_buff * skb)145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
146 {
147 int err;
148 int nfrags;
149 int ihl;
150 u8 *icv;
151 struct sk_buff *trailer;
152 struct crypto_ahash *ahash;
153 struct ahash_request *req;
154 struct scatterlist *sg;
155 struct iphdr *iph, *top_iph;
156 struct ip_auth_hdr *ah;
157 struct ah_data *ahp;
158 int seqhi_len = 0;
159 __be32 *seqhi;
160 int sglists = 0;
161 struct scatterlist *seqhisg;
162
163 ahp = x->data;
164 ahash = ahp->ahash;
165
166 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
167 goto out;
168 nfrags = err;
169
170 skb_push(skb, -skb_network_offset(skb));
171 ah = ip_auth_hdr(skb);
172 ihl = ip_hdrlen(skb);
173
174 if (x->props.flags & XFRM_STATE_ESN) {
175 sglists = 1;
176 seqhi_len = sizeof(*seqhi);
177 }
178 err = -ENOMEM;
179 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
180 if (!iph)
181 goto out;
182 seqhi = (__be32 *)((char *)iph + ihl);
183 icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
184 req = ah_tmp_req(ahash, icv);
185 sg = ah_req_sg(ahash, req);
186 seqhisg = sg + nfrags;
187
188 memset(ah->auth_data, 0, ahp->icv_trunc_len);
189
190 top_iph = ip_hdr(skb);
191
192 iph->tos = top_iph->tos;
193 iph->ttl = top_iph->ttl;
194 iph->frag_off = top_iph->frag_off;
195
196 if (top_iph->ihl != 5) {
197 iph->daddr = top_iph->daddr;
198 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
199 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
200 if (err)
201 goto out_free;
202 }
203
204 ah->nexthdr = *skb_mac_header(skb);
205 *skb_mac_header(skb) = IPPROTO_AH;
206
207 top_iph->tos = 0;
208 top_iph->tot_len = htons(skb->len);
209 top_iph->frag_off = 0;
210 top_iph->ttl = 0;
211 top_iph->check = 0;
212
213 if (x->props.flags & XFRM_STATE_ALIGN4)
214 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
215 else
216 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
217
218 ah->reserved = 0;
219 ah->spi = x->id.spi;
220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
221
222 sg_init_table(sg, nfrags + sglists);
223 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
224 if (unlikely(err < 0))
225 goto out_free;
226
227 if (x->props.flags & XFRM_STATE_ESN) {
228 /* Attach seqhi sg right after packet payload */
229 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
230 sg_set_buf(seqhisg, seqhi, seqhi_len);
231 }
232 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
233 ahash_request_set_callback(req, 0, ah_output_done, skb);
234
235 AH_SKB_CB(skb)->tmp = iph;
236
237 err = crypto_ahash_digest(req);
238 if (err) {
239 if (err == -EINPROGRESS)
240 goto out;
241
242 if (err == -EBUSY)
243 err = NET_XMIT_DROP;
244 goto out_free;
245 }
246
247 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
248
249 top_iph->tos = iph->tos;
250 top_iph->ttl = iph->ttl;
251 top_iph->frag_off = iph->frag_off;
252 if (top_iph->ihl != 5) {
253 top_iph->daddr = iph->daddr;
254 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
255 }
256
257 out_free:
258 kfree(iph);
259 out:
260 return err;
261 }
262
ah_input_done(struct crypto_async_request * base,int err)263 static void ah_input_done(struct crypto_async_request *base, int err)
264 {
265 u8 *auth_data;
266 u8 *icv;
267 struct iphdr *work_iph;
268 struct sk_buff *skb = base->data;
269 struct xfrm_state *x = xfrm_input_state(skb);
270 struct ah_data *ahp = x->data;
271 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
272 int ihl = ip_hdrlen(skb);
273 int ah_hlen = (ah->hdrlen + 2) << 2;
274
275 if (err)
276 goto out;
277
278 work_iph = AH_SKB_CB(skb)->tmp;
279 auth_data = ah_tmp_auth(work_iph, ihl);
280 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
281
282 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
283 if (err)
284 goto out;
285
286 err = ah->nexthdr;
287
288 skb->network_header += ah_hlen;
289 memcpy(skb_network_header(skb), work_iph, ihl);
290 __skb_pull(skb, ah_hlen + ihl);
291
292 if (x->props.mode == XFRM_MODE_TUNNEL)
293 skb_reset_transport_header(skb);
294 else
295 skb_set_transport_header(skb, -ihl);
296 out:
297 kfree(AH_SKB_CB(skb)->tmp);
298 xfrm_input_resume(skb, err);
299 }
300
ah_input(struct xfrm_state * x,struct sk_buff * skb)301 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
302 {
303 int ah_hlen;
304 int ihl;
305 int nexthdr;
306 int nfrags;
307 u8 *auth_data;
308 u8 *icv;
309 struct sk_buff *trailer;
310 struct crypto_ahash *ahash;
311 struct ahash_request *req;
312 struct scatterlist *sg;
313 struct iphdr *iph, *work_iph;
314 struct ip_auth_hdr *ah;
315 struct ah_data *ahp;
316 int err = -ENOMEM;
317 int seqhi_len = 0;
318 __be32 *seqhi;
319 int sglists = 0;
320 struct scatterlist *seqhisg;
321
322 if (!pskb_may_pull(skb, sizeof(*ah)))
323 goto out;
324
325 ah = (struct ip_auth_hdr *)skb->data;
326 ahp = x->data;
327 ahash = ahp->ahash;
328
329 nexthdr = ah->nexthdr;
330 ah_hlen = (ah->hdrlen + 2) << 2;
331
332 if (x->props.flags & XFRM_STATE_ALIGN4) {
333 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
334 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
335 goto out;
336 } else {
337 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
338 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
339 goto out;
340 }
341
342 if (!pskb_may_pull(skb, ah_hlen))
343 goto out;
344
345 /* We are going to _remove_ AH header to keep sockets happy,
346 * so... Later this can change. */
347 if (skb_unclone(skb, GFP_ATOMIC))
348 goto out;
349
350 skb->ip_summed = CHECKSUM_NONE;
351
352
353 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
354 goto out;
355 nfrags = err;
356
357 ah = (struct ip_auth_hdr *)skb->data;
358 iph = ip_hdr(skb);
359 ihl = ip_hdrlen(skb);
360
361 if (x->props.flags & XFRM_STATE_ESN) {
362 sglists = 1;
363 seqhi_len = sizeof(*seqhi);
364 }
365
366 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
367 ahp->icv_trunc_len + seqhi_len);
368 if (!work_iph) {
369 err = -ENOMEM;
370 goto out;
371 }
372
373 seqhi = (__be32 *)((char *)work_iph + ihl);
374 auth_data = ah_tmp_auth(seqhi, seqhi_len);
375 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
376 req = ah_tmp_req(ahash, icv);
377 sg = ah_req_sg(ahash, req);
378 seqhisg = sg + nfrags;
379
380 memcpy(work_iph, iph, ihl);
381 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
382 memset(ah->auth_data, 0, ahp->icv_trunc_len);
383
384 iph->ttl = 0;
385 iph->tos = 0;
386 iph->frag_off = 0;
387 iph->check = 0;
388 if (ihl > sizeof(*iph)) {
389 __be32 dummy;
390 err = ip_clear_mutable_options(iph, &dummy);
391 if (err)
392 goto out_free;
393 }
394
395 skb_push(skb, ihl);
396
397 sg_init_table(sg, nfrags + sglists);
398 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
399 if (unlikely(err < 0))
400 goto out_free;
401
402 if (x->props.flags & XFRM_STATE_ESN) {
403 /* Attach seqhi sg right after packet payload */
404 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
405 sg_set_buf(seqhisg, seqhi, seqhi_len);
406 }
407 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
408 ahash_request_set_callback(req, 0, ah_input_done, skb);
409
410 AH_SKB_CB(skb)->tmp = work_iph;
411
412 err = crypto_ahash_digest(req);
413 if (err) {
414 if (err == -EINPROGRESS)
415 goto out;
416
417 goto out_free;
418 }
419
420 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
421 if (err)
422 goto out_free;
423
424 skb->network_header += ah_hlen;
425 memcpy(skb_network_header(skb), work_iph, ihl);
426 __skb_pull(skb, ah_hlen + ihl);
427 if (x->props.mode == XFRM_MODE_TUNNEL)
428 skb_reset_transport_header(skb);
429 else
430 skb_set_transport_header(skb, -ihl);
431
432 err = nexthdr;
433
434 out_free:
435 kfree (work_iph);
436 out:
437 return err;
438 }
439
ah4_err(struct sk_buff * skb,u32 info)440 static int ah4_err(struct sk_buff *skb, u32 info)
441 {
442 struct net *net = dev_net(skb->dev);
443 const struct iphdr *iph = (const struct iphdr *)skb->data;
444 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
445 struct xfrm_state *x;
446
447 switch (icmp_hdr(skb)->type) {
448 case ICMP_DEST_UNREACH:
449 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
450 return 0;
451 case ICMP_REDIRECT:
452 break;
453 default:
454 return 0;
455 }
456
457 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
458 ah->spi, IPPROTO_AH, AF_INET);
459 if (!x)
460 return 0;
461
462 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
463 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
464 else
465 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
466 xfrm_state_put(x);
467
468 return 0;
469 }
470
ah_init_state(struct xfrm_state * x)471 static int ah_init_state(struct xfrm_state *x)
472 {
473 struct ah_data *ahp = NULL;
474 struct xfrm_algo_desc *aalg_desc;
475 struct crypto_ahash *ahash;
476
477 if (!x->aalg)
478 goto error;
479
480 if (x->encap)
481 goto error;
482
483 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
484 if (!ahp)
485 return -ENOMEM;
486
487 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
488 if (IS_ERR(ahash))
489 goto error;
490
491 ahp->ahash = ahash;
492 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
493 (x->aalg->alg_key_len + 7) / 8))
494 goto error;
495
496 /*
497 * Lookup the algorithm description maintained by xfrm_algo,
498 * verify crypto transform properties, and store information
499 * we need for AH processing. This lookup cannot fail here
500 * after a successful crypto_alloc_ahash().
501 */
502 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
503 BUG_ON(!aalg_desc);
504
505 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
506 crypto_ahash_digestsize(ahash)) {
507 pr_info("%s: %s digestsize %u != %hu\n",
508 __func__, x->aalg->alg_name,
509 crypto_ahash_digestsize(ahash),
510 aalg_desc->uinfo.auth.icv_fullbits / 8);
511 goto error;
512 }
513
514 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
515 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
516
517 if (x->props.flags & XFRM_STATE_ALIGN4)
518 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
519 ahp->icv_trunc_len);
520 else
521 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
522 ahp->icv_trunc_len);
523 if (x->props.mode == XFRM_MODE_TUNNEL)
524 x->props.header_len += sizeof(struct iphdr);
525 x->data = ahp;
526
527 return 0;
528
529 error:
530 if (ahp) {
531 crypto_free_ahash(ahp->ahash);
532 kfree(ahp);
533 }
534 return -EINVAL;
535 }
536
ah_destroy(struct xfrm_state * x)537 static void ah_destroy(struct xfrm_state *x)
538 {
539 struct ah_data *ahp = x->data;
540
541 if (!ahp)
542 return;
543
544 crypto_free_ahash(ahp->ahash);
545 kfree(ahp);
546 }
547
ah4_rcv_cb(struct sk_buff * skb,int err)548 static int ah4_rcv_cb(struct sk_buff *skb, int err)
549 {
550 return 0;
551 }
552
553 static const struct xfrm_type ah_type =
554 {
555 .description = "AH4",
556 .owner = THIS_MODULE,
557 .proto = IPPROTO_AH,
558 .flags = XFRM_TYPE_REPLAY_PROT,
559 .init_state = ah_init_state,
560 .destructor = ah_destroy,
561 .input = ah_input,
562 .output = ah_output
563 };
564
565 static struct xfrm4_protocol ah4_protocol = {
566 .handler = xfrm4_rcv,
567 .input_handler = xfrm_input,
568 .cb_handler = ah4_rcv_cb,
569 .err_handler = ah4_err,
570 .priority = 0,
571 };
572
ah4_init(void)573 static int __init ah4_init(void)
574 {
575 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
576 pr_info("%s: can't add xfrm type\n", __func__);
577 return -EAGAIN;
578 }
579 if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
580 pr_info("%s: can't add protocol\n", __func__);
581 xfrm_unregister_type(&ah_type, AF_INET);
582 return -EAGAIN;
583 }
584 return 0;
585 }
586
ah4_fini(void)587 static void __exit ah4_fini(void)
588 {
589 if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
590 pr_info("%s: can't remove protocol\n", __func__);
591 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
592 pr_info("%s: can't remove xfrm type\n", __func__);
593 }
594
595 module_init(ah4_init);
596 module_exit(ah4_fini);
597 MODULE_LICENSE("GPL");
598 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
599