1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xfrm_input.c
4 *
5 * Changes:
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
8 *
9 */
10
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
23
24 #include "xfrm_inout.h"
25
26 struct xfrm_trans_tasklet {
27 struct tasklet_struct tasklet;
28 struct sk_buff_head queue;
29 };
30
31 struct xfrm_trans_cb {
32 union {
33 struct inet_skb_parm h4;
34 #if IS_ENABLED(CONFIG_IPV6)
35 struct inet6_skb_parm h6;
36 #endif
37 } header;
38 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
39 };
40
41 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
42
43 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
44 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
45
46 static struct gro_cells gro_cells;
47 static struct net_device xfrm_napi_dev;
48
49 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
50
xfrm_input_register_afinfo(const struct xfrm_input_afinfo * afinfo)51 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
52 {
53 int err = 0;
54
55 if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
56 return -EAFNOSUPPORT;
57
58 spin_lock_bh(&xfrm_input_afinfo_lock);
59 if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
60 err = -EEXIST;
61 else
62 rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
63 spin_unlock_bh(&xfrm_input_afinfo_lock);
64 return err;
65 }
66 EXPORT_SYMBOL(xfrm_input_register_afinfo);
67
xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo * afinfo)68 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
69 {
70 int err = 0;
71
72 spin_lock_bh(&xfrm_input_afinfo_lock);
73 if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
74 if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
75 err = -EINVAL;
76 else
77 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
78 }
79 spin_unlock_bh(&xfrm_input_afinfo_lock);
80 synchronize_rcu();
81 return err;
82 }
83 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
84
xfrm_input_get_afinfo(unsigned int family)85 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
86 {
87 const struct xfrm_input_afinfo *afinfo;
88
89 if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
90 return NULL;
91
92 rcu_read_lock();
93 afinfo = rcu_dereference(xfrm_input_afinfo[family]);
94 if (unlikely(!afinfo))
95 rcu_read_unlock();
96 return afinfo;
97 }
98
xfrm_rcv_cb(struct sk_buff * skb,unsigned int family,u8 protocol,int err)99 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
100 int err)
101 {
102 int ret;
103 const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
104
105 if (!afinfo)
106 return -EAFNOSUPPORT;
107
108 ret = afinfo->callback(skb, protocol, err);
109 rcu_read_unlock();
110
111 return ret;
112 }
113
secpath_set(struct sk_buff * skb)114 struct sec_path *secpath_set(struct sk_buff *skb)
115 {
116 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
117
118 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
119 if (!sp)
120 return NULL;
121
122 if (tmp) /* reused existing one (was COW'd if needed) */
123 return sp;
124
125 /* allocated new secpath */
126 memset(sp->ovec, 0, sizeof(sp->ovec));
127 sp->olen = 0;
128 sp->len = 0;
129
130 return sp;
131 }
132 EXPORT_SYMBOL(secpath_set);
133
134 /* Fetch spi and seq from ipsec header */
135
xfrm_parse_spi(struct sk_buff * skb,u8 nexthdr,__be32 * spi,__be32 * seq)136 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
137 {
138 int offset, offset_seq;
139 int hlen;
140
141 switch (nexthdr) {
142 case IPPROTO_AH:
143 hlen = sizeof(struct ip_auth_hdr);
144 offset = offsetof(struct ip_auth_hdr, spi);
145 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
146 break;
147 case IPPROTO_ESP:
148 hlen = sizeof(struct ip_esp_hdr);
149 offset = offsetof(struct ip_esp_hdr, spi);
150 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
151 break;
152 case IPPROTO_COMP:
153 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
154 return -EINVAL;
155 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
156 *seq = 0;
157 return 0;
158 default:
159 return 1;
160 }
161
162 if (!pskb_may_pull(skb, hlen))
163 return -EINVAL;
164
165 *spi = *(__be32 *)(skb_transport_header(skb) + offset);
166 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
167 return 0;
168 }
169 EXPORT_SYMBOL(xfrm_parse_spi);
170
xfrm4_remove_beet_encap(struct xfrm_state * x,struct sk_buff * skb)171 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
172 {
173 struct iphdr *iph;
174 int optlen = 0;
175 int err = -EINVAL;
176
177 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
178 struct ip_beet_phdr *ph;
179 int phlen;
180
181 if (!pskb_may_pull(skb, sizeof(*ph)))
182 goto out;
183
184 ph = (struct ip_beet_phdr *)skb->data;
185
186 phlen = sizeof(*ph) + ph->padlen;
187 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
188 if (optlen < 0 || optlen & 3 || optlen > 250)
189 goto out;
190
191 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
192
193 if (!pskb_may_pull(skb, phlen))
194 goto out;
195 __skb_pull(skb, phlen);
196 }
197
198 skb_push(skb, sizeof(*iph));
199 skb_reset_network_header(skb);
200 skb_mac_header_rebuild(skb);
201
202 xfrm4_beet_make_header(skb);
203
204 iph = ip_hdr(skb);
205
206 iph->ihl += optlen / 4;
207 iph->tot_len = htons(skb->len);
208 iph->daddr = x->sel.daddr.a4;
209 iph->saddr = x->sel.saddr.a4;
210 iph->check = 0;
211 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
212 err = 0;
213 out:
214 return err;
215 }
216
ipip_ecn_decapsulate(struct sk_buff * skb)217 static void ipip_ecn_decapsulate(struct sk_buff *skb)
218 {
219 struct iphdr *inner_iph = ipip_hdr(skb);
220
221 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
222 IP_ECN_set_ce(inner_iph);
223 }
224
xfrm4_remove_tunnel_encap(struct xfrm_state * x,struct sk_buff * skb)225 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
226 {
227 int err = -EINVAL;
228
229 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
230 goto out;
231
232 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
233 goto out;
234
235 err = skb_unclone(skb, GFP_ATOMIC);
236 if (err)
237 goto out;
238
239 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
240 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
241 if (!(x->props.flags & XFRM_STATE_NOECN))
242 ipip_ecn_decapsulate(skb);
243
244 skb_reset_network_header(skb);
245 skb_mac_header_rebuild(skb);
246 if (skb->mac_len)
247 eth_hdr(skb)->h_proto = skb->protocol;
248
249 err = 0;
250
251 out:
252 return err;
253 }
254
ipip6_ecn_decapsulate(struct sk_buff * skb)255 static void ipip6_ecn_decapsulate(struct sk_buff *skb)
256 {
257 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
258
259 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
260 IP6_ECN_set_ce(skb, inner_iph);
261 }
262
xfrm6_remove_tunnel_encap(struct xfrm_state * x,struct sk_buff * skb)263 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
264 {
265 int err = -EINVAL;
266
267 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
268 goto out;
269 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
270 goto out;
271
272 err = skb_unclone(skb, GFP_ATOMIC);
273 if (err)
274 goto out;
275
276 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
277 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
278 ipipv6_hdr(skb));
279 if (!(x->props.flags & XFRM_STATE_NOECN))
280 ipip6_ecn_decapsulate(skb);
281
282 skb_reset_network_header(skb);
283 skb_mac_header_rebuild(skb);
284 if (skb->mac_len)
285 eth_hdr(skb)->h_proto = skb->protocol;
286
287 err = 0;
288
289 out:
290 return err;
291 }
292
xfrm6_remove_beet_encap(struct xfrm_state * x,struct sk_buff * skb)293 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
294 {
295 struct ipv6hdr *ip6h;
296 int size = sizeof(struct ipv6hdr);
297 int err;
298
299 err = skb_cow_head(skb, size + skb->mac_len);
300 if (err)
301 goto out;
302
303 __skb_push(skb, size);
304 skb_reset_network_header(skb);
305 skb_mac_header_rebuild(skb);
306
307 xfrm6_beet_make_header(skb);
308
309 ip6h = ipv6_hdr(skb);
310 ip6h->payload_len = htons(skb->len - size);
311 ip6h->daddr = x->sel.daddr.in6;
312 ip6h->saddr = x->sel.saddr.in6;
313 err = 0;
314 out:
315 return err;
316 }
317
318 /* Remove encapsulation header.
319 *
320 * The IP header will be moved over the top of the encapsulation
321 * header.
322 *
323 * On entry, the transport header shall point to where the IP header
324 * should be and the network header shall be set to where the IP
325 * header currently is. skb->data shall point to the start of the
326 * payload.
327 */
328 static int
xfrm_inner_mode_encap_remove(struct xfrm_state * x,const struct xfrm_mode * inner_mode,struct sk_buff * skb)329 xfrm_inner_mode_encap_remove(struct xfrm_state *x,
330 const struct xfrm_mode *inner_mode,
331 struct sk_buff *skb)
332 {
333 switch (inner_mode->encap) {
334 case XFRM_MODE_BEET:
335 if (inner_mode->family == AF_INET)
336 return xfrm4_remove_beet_encap(x, skb);
337 if (inner_mode->family == AF_INET6)
338 return xfrm6_remove_beet_encap(x, skb);
339 break;
340 case XFRM_MODE_TUNNEL:
341 if (inner_mode->family == AF_INET)
342 return xfrm4_remove_tunnel_encap(x, skb);
343 if (inner_mode->family == AF_INET6)
344 return xfrm6_remove_tunnel_encap(x, skb);
345 break;
346 }
347
348 WARN_ON_ONCE(1);
349 return -EOPNOTSUPP;
350 }
351
xfrm_prepare_input(struct xfrm_state * x,struct sk_buff * skb)352 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
353 {
354 const struct xfrm_mode *inner_mode = &x->inner_mode;
355 const struct xfrm_state_afinfo *afinfo;
356 int err = -EAFNOSUPPORT;
357
358 rcu_read_lock();
359 afinfo = xfrm_state_afinfo_get_rcu(x->outer_mode.family);
360 if (likely(afinfo))
361 err = afinfo->extract_input(x, skb);
362 rcu_read_unlock();
363
364 if (err)
365 return err;
366
367 if (x->sel.family == AF_UNSPEC) {
368 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
369 if (!inner_mode)
370 return -EAFNOSUPPORT;
371 }
372
373 switch (inner_mode->family) {
374 case AF_INET:
375 skb->protocol = htons(ETH_P_IP);
376 break;
377 case AF_INET6:
378 skb->protocol = htons(ETH_P_IPV6);
379 break;
380 default:
381 WARN_ON_ONCE(1);
382 break;
383 }
384
385 return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
386 }
387
388 /* Remove encapsulation header.
389 *
390 * The IP header will be moved over the top of the encapsulation header.
391 *
392 * On entry, skb_transport_header() shall point to where the IP header
393 * should be and skb_network_header() shall be set to where the IP header
394 * currently is. skb->data shall point to the start of the payload.
395 */
xfrm4_transport_input(struct xfrm_state * x,struct sk_buff * skb)396 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
397 {
398 int ihl = skb->data - skb_transport_header(skb);
399
400 if (skb->transport_header != skb->network_header) {
401 memmove(skb_transport_header(skb),
402 skb_network_header(skb), ihl);
403 skb->network_header = skb->transport_header;
404 }
405 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
406 skb_reset_transport_header(skb);
407 return 0;
408 }
409
xfrm6_transport_input(struct xfrm_state * x,struct sk_buff * skb)410 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
411 {
412 #if IS_ENABLED(CONFIG_IPV6)
413 int ihl = skb->data - skb_transport_header(skb);
414
415 if (skb->transport_header != skb->network_header) {
416 memmove(skb_transport_header(skb),
417 skb_network_header(skb), ihl);
418 skb->network_header = skb->transport_header;
419 }
420 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
421 sizeof(struct ipv6hdr));
422 skb_reset_transport_header(skb);
423 return 0;
424 #else
425 WARN_ON_ONCE(1);
426 return -EAFNOSUPPORT;
427 #endif
428 }
429
xfrm_inner_mode_input(struct xfrm_state * x,const struct xfrm_mode * inner_mode,struct sk_buff * skb)430 static int xfrm_inner_mode_input(struct xfrm_state *x,
431 const struct xfrm_mode *inner_mode,
432 struct sk_buff *skb)
433 {
434 switch (inner_mode->encap) {
435 case XFRM_MODE_BEET:
436 case XFRM_MODE_TUNNEL:
437 return xfrm_prepare_input(x, skb);
438 case XFRM_MODE_TRANSPORT:
439 if (inner_mode->family == AF_INET)
440 return xfrm4_transport_input(x, skb);
441 if (inner_mode->family == AF_INET6)
442 return xfrm6_transport_input(x, skb);
443 break;
444 case XFRM_MODE_ROUTEOPTIMIZATION:
445 WARN_ON_ONCE(1);
446 break;
447 default:
448 WARN_ON_ONCE(1);
449 break;
450 }
451
452 return -EOPNOTSUPP;
453 }
454
xfrm_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type)455 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
456 {
457 const struct xfrm_state_afinfo *afinfo;
458 struct net *net = dev_net(skb->dev);
459 const struct xfrm_mode *inner_mode;
460 int err;
461 __be32 seq;
462 __be32 seq_hi;
463 struct xfrm_state *x = NULL;
464 xfrm_address_t *daddr;
465 u32 mark = skb->mark;
466 unsigned int family = AF_UNSPEC;
467 int decaps = 0;
468 int async = 0;
469 bool xfrm_gro = false;
470 bool crypto_done = false;
471 struct xfrm_offload *xo = xfrm_offload(skb);
472 struct sec_path *sp;
473
474 if (encap_type < 0) {
475 x = xfrm_input_state(skb);
476
477 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
478 if (x->km.state == XFRM_STATE_ACQ)
479 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
480 else
481 XFRM_INC_STATS(net,
482 LINUX_MIB_XFRMINSTATEINVALID);
483
484 if (encap_type == -1)
485 dev_put(skb->dev);
486 goto drop;
487 }
488
489 family = x->outer_mode.family;
490
491 /* An encap_type of -1 indicates async resumption. */
492 if (encap_type == -1) {
493 async = 1;
494 seq = XFRM_SKB_CB(skb)->seq.input.low;
495 goto resume;
496 }
497
498 /* encap_type < -1 indicates a GRO call. */
499 encap_type = 0;
500 seq = XFRM_SPI_SKB_CB(skb)->seq;
501
502 if (xo && (xo->flags & CRYPTO_DONE)) {
503 crypto_done = true;
504 family = XFRM_SPI_SKB_CB(skb)->family;
505
506 if (!(xo->status & CRYPTO_SUCCESS)) {
507 if (xo->status &
508 (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
509 CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
510 CRYPTO_TUNNEL_AH_AUTH_FAILED |
511 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
512
513 xfrm_audit_state_icvfail(x, skb,
514 x->type->proto);
515 x->stats.integrity_failed++;
516 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
517 goto drop;
518 }
519
520 if (xo->status & CRYPTO_INVALID_PROTOCOL) {
521 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
522 goto drop;
523 }
524
525 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
526 goto drop;
527 }
528
529 if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
530 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
531 goto drop;
532 }
533 }
534
535 goto lock;
536 }
537
538 family = XFRM_SPI_SKB_CB(skb)->family;
539
540 /* if tunnel is present override skb->mark value with tunnel i_key */
541 switch (family) {
542 case AF_INET:
543 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
544 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
545 break;
546 case AF_INET6:
547 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
548 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
549 break;
550 }
551
552 sp = secpath_set(skb);
553 if (!sp) {
554 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
555 goto drop;
556 }
557
558 seq = 0;
559 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
560 secpath_reset(skb);
561 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
562 goto drop;
563 }
564
565 daddr = (xfrm_address_t *)(skb_network_header(skb) +
566 XFRM_SPI_SKB_CB(skb)->daddroff);
567 do {
568 sp = skb_sec_path(skb);
569
570 if (sp->len == XFRM_MAX_DEPTH) {
571 secpath_reset(skb);
572 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
573 goto drop;
574 }
575
576 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
577 if (x == NULL) {
578 secpath_reset(skb);
579 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
580 xfrm_audit_state_notfound(skb, family, spi, seq);
581 goto drop;
582 }
583
584 skb->mark = xfrm_smark_get(skb->mark, x);
585
586 sp->xvec[sp->len++] = x;
587
588 skb_dst_force(skb);
589 if (!skb_dst(skb)) {
590 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
591 goto drop;
592 }
593
594 lock:
595 spin_lock(&x->lock);
596
597 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
598 if (x->km.state == XFRM_STATE_ACQ)
599 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
600 else
601 XFRM_INC_STATS(net,
602 LINUX_MIB_XFRMINSTATEINVALID);
603 goto drop_unlock;
604 }
605
606 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
607 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
608 goto drop_unlock;
609 }
610
611 if (x->repl->check(x, skb, seq)) {
612 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
613 goto drop_unlock;
614 }
615
616 if (xfrm_state_check_expire(x)) {
617 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
618 goto drop_unlock;
619 }
620
621 spin_unlock(&x->lock);
622
623 if (xfrm_tunnel_check(skb, x, family)) {
624 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
625 goto drop;
626 }
627
628 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
629
630 XFRM_SKB_CB(skb)->seq.input.low = seq;
631 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
632
633 dev_hold(skb->dev);
634
635 if (crypto_done)
636 nexthdr = x->type_offload->input_tail(x, skb);
637 else
638 nexthdr = x->type->input(x, skb);
639
640 if (nexthdr == -EINPROGRESS)
641 return 0;
642 resume:
643 dev_put(skb->dev);
644
645 spin_lock(&x->lock);
646 if (nexthdr <= 0) {
647 if (nexthdr == -EBADMSG) {
648 xfrm_audit_state_icvfail(x, skb,
649 x->type->proto);
650 x->stats.integrity_failed++;
651 }
652 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
653 goto drop_unlock;
654 }
655
656 /* only the first xfrm gets the encap type */
657 encap_type = 0;
658
659 if (async && x->repl->recheck(x, skb, seq)) {
660 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
661 goto drop_unlock;
662 }
663
664 x->repl->advance(x, seq);
665
666 x->curlft.bytes += skb->len;
667 x->curlft.packets++;
668
669 spin_unlock(&x->lock);
670
671 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
672
673 inner_mode = &x->inner_mode;
674
675 if (x->sel.family == AF_UNSPEC) {
676 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
677 if (inner_mode == NULL) {
678 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
679 goto drop;
680 }
681 }
682
683 if (xfrm_inner_mode_input(x, inner_mode, skb)) {
684 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
685 goto drop;
686 }
687
688 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
689 decaps = 1;
690 break;
691 }
692
693 /*
694 * We need the inner address. However, we only get here for
695 * transport mode so the outer address is identical.
696 */
697 daddr = &x->id.daddr;
698 family = x->outer_mode.family;
699
700 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
701 if (err < 0) {
702 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
703 goto drop;
704 }
705 crypto_done = false;
706 } while (!err);
707
708 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
709 if (err)
710 goto drop;
711
712 nf_reset_ct(skb);
713
714 if (decaps) {
715 sp = skb_sec_path(skb);
716 if (sp)
717 sp->olen = 0;
718 skb_dst_drop(skb);
719 gro_cells_receive(&gro_cells, skb);
720 return 0;
721 } else {
722 xo = xfrm_offload(skb);
723 if (xo)
724 xfrm_gro = xo->flags & XFRM_GRO;
725
726 err = -EAFNOSUPPORT;
727 rcu_read_lock();
728 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family);
729 if (likely(afinfo))
730 err = afinfo->transport_finish(skb, xfrm_gro || async);
731 rcu_read_unlock();
732 if (xfrm_gro) {
733 sp = skb_sec_path(skb);
734 if (sp)
735 sp->olen = 0;
736 skb_dst_drop(skb);
737 gro_cells_receive(&gro_cells, skb);
738 return err;
739 }
740
741 return err;
742 }
743
744 drop_unlock:
745 spin_unlock(&x->lock);
746 drop:
747 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
748 kfree_skb(skb);
749 return 0;
750 }
751 EXPORT_SYMBOL(xfrm_input);
752
xfrm_input_resume(struct sk_buff * skb,int nexthdr)753 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
754 {
755 return xfrm_input(skb, nexthdr, 0, -1);
756 }
757 EXPORT_SYMBOL(xfrm_input_resume);
758
xfrm_trans_reinject(unsigned long data)759 static void xfrm_trans_reinject(unsigned long data)
760 {
761 struct xfrm_trans_tasklet *trans = (void *)data;
762 struct sk_buff_head queue;
763 struct sk_buff *skb;
764
765 __skb_queue_head_init(&queue);
766 skb_queue_splice_init(&trans->queue, &queue);
767
768 while ((skb = __skb_dequeue(&queue)))
769 XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
770 }
771
xfrm_trans_queue(struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *))772 int xfrm_trans_queue(struct sk_buff *skb,
773 int (*finish)(struct net *, struct sock *,
774 struct sk_buff *))
775 {
776 struct xfrm_trans_tasklet *trans;
777
778 trans = this_cpu_ptr(&xfrm_trans_tasklet);
779
780 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
781 return -ENOBUFS;
782
783 XFRM_TRANS_SKB_CB(skb)->finish = finish;
784 __skb_queue_tail(&trans->queue, skb);
785 tasklet_schedule(&trans->tasklet);
786 return 0;
787 }
788 EXPORT_SYMBOL(xfrm_trans_queue);
789
xfrm_input_init(void)790 void __init xfrm_input_init(void)
791 {
792 int err;
793 int i;
794
795 init_dummy_netdev(&xfrm_napi_dev);
796 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
797 if (err)
798 gro_cells.cells = NULL;
799
800 for_each_possible_cpu(i) {
801 struct xfrm_trans_tasklet *trans;
802
803 trans = &per_cpu(xfrm_trans_tasklet, i);
804 __skb_queue_head_init(&trans->queue);
805 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
806 (unsigned long)trans);
807 }
808 }
809