1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xfrm_input.c
4 *
5 * Changes:
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
8 *
9 */
10
11 #include <linux/bottom_half.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/percpu.h>
17 #include <net/dst.h>
18 #include <net/ip.h>
19 #include <net/xfrm.h>
20 #include <net/ip_tunnels.h>
21 #include <net/ip6_tunnel.h>
22
23 struct xfrm_trans_tasklet {
24 struct tasklet_struct tasklet;
25 struct sk_buff_head queue;
26 };
27
28 struct xfrm_trans_cb {
29 union {
30 struct inet_skb_parm h4;
31 #if IS_ENABLED(CONFIG_IPV6)
32 struct inet6_skb_parm h6;
33 #endif
34 } header;
35 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
36 };
37
38 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
39
40 static struct kmem_cache *secpath_cachep __read_mostly;
41
42 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
43 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
44
45 static struct gro_cells gro_cells;
46 static struct net_device xfrm_napi_dev;
47
48 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
49
xfrm_input_register_afinfo(const struct xfrm_input_afinfo * afinfo)50 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
51 {
52 int err = 0;
53
54 if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
55 return -EAFNOSUPPORT;
56
57 spin_lock_bh(&xfrm_input_afinfo_lock);
58 if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
59 err = -EEXIST;
60 else
61 rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
62 spin_unlock_bh(&xfrm_input_afinfo_lock);
63 return err;
64 }
65 EXPORT_SYMBOL(xfrm_input_register_afinfo);
66
xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo * afinfo)67 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
68 {
69 int err = 0;
70
71 spin_lock_bh(&xfrm_input_afinfo_lock);
72 if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
73 if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
74 err = -EINVAL;
75 else
76 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
77 }
78 spin_unlock_bh(&xfrm_input_afinfo_lock);
79 synchronize_rcu();
80 return err;
81 }
82 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
83
xfrm_input_get_afinfo(unsigned int family)84 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
85 {
86 const struct xfrm_input_afinfo *afinfo;
87
88 if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
89 return NULL;
90
91 rcu_read_lock();
92 afinfo = rcu_dereference(xfrm_input_afinfo[family]);
93 if (unlikely(!afinfo))
94 rcu_read_unlock();
95 return afinfo;
96 }
97
xfrm_rcv_cb(struct sk_buff * skb,unsigned int family,u8 protocol,int err)98 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
99 int err)
100 {
101 int ret;
102 const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
103
104 if (!afinfo)
105 return -EAFNOSUPPORT;
106
107 ret = afinfo->callback(skb, protocol, err);
108 rcu_read_unlock();
109
110 return ret;
111 }
112
__secpath_destroy(struct sec_path * sp)113 void __secpath_destroy(struct sec_path *sp)
114 {
115 int i;
116 for (i = 0; i < sp->len; i++)
117 xfrm_state_put(sp->xvec[i]);
118 kmem_cache_free(secpath_cachep, sp);
119 }
120 EXPORT_SYMBOL(__secpath_destroy);
121
secpath_dup(struct sec_path * src)122 struct sec_path *secpath_dup(struct sec_path *src)
123 {
124 struct sec_path *sp;
125
126 sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
127 if (!sp)
128 return NULL;
129
130 sp->len = 0;
131 sp->olen = 0;
132
133 memset(sp->ovec, 0, sizeof(sp->ovec));
134
135 if (src) {
136 int i;
137
138 memcpy(sp, src, sizeof(*sp));
139 for (i = 0; i < sp->len; i++)
140 xfrm_state_hold(sp->xvec[i]);
141 }
142 refcount_set(&sp->refcnt, 1);
143 return sp;
144 }
145 EXPORT_SYMBOL(secpath_dup);
146
secpath_set(struct sk_buff * skb)147 int secpath_set(struct sk_buff *skb)
148 {
149 struct sec_path *sp;
150
151 /* Allocate new secpath or COW existing one. */
152 if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) {
153 sp = secpath_dup(skb->sp);
154 if (!sp)
155 return -ENOMEM;
156
157 if (skb->sp)
158 secpath_put(skb->sp);
159 skb->sp = sp;
160 }
161 return 0;
162 }
163 EXPORT_SYMBOL(secpath_set);
164
165 /* Fetch spi and seq from ipsec header */
166
xfrm_parse_spi(struct sk_buff * skb,u8 nexthdr,__be32 * spi,__be32 * seq)167 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
168 {
169 int offset, offset_seq;
170 int hlen;
171
172 switch (nexthdr) {
173 case IPPROTO_AH:
174 hlen = sizeof(struct ip_auth_hdr);
175 offset = offsetof(struct ip_auth_hdr, spi);
176 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
177 break;
178 case IPPROTO_ESP:
179 hlen = sizeof(struct ip_esp_hdr);
180 offset = offsetof(struct ip_esp_hdr, spi);
181 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
182 break;
183 case IPPROTO_COMP:
184 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
185 return -EINVAL;
186 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
187 *seq = 0;
188 return 0;
189 default:
190 return 1;
191 }
192
193 if (!pskb_may_pull(skb, hlen))
194 return -EINVAL;
195
196 *spi = *(__be32 *)(skb_transport_header(skb) + offset);
197 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
198 return 0;
199 }
200 EXPORT_SYMBOL(xfrm_parse_spi);
201
xfrm_prepare_input(struct xfrm_state * x,struct sk_buff * skb)202 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
203 {
204 struct xfrm_mode *inner_mode = x->inner_mode;
205 int err;
206
207 err = x->outer_mode->afinfo->extract_input(x, skb);
208 if (err)
209 return err;
210
211 if (x->sel.family == AF_UNSPEC) {
212 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
213 if (inner_mode == NULL)
214 return -EAFNOSUPPORT;
215 }
216
217 skb->protocol = inner_mode->afinfo->eth_proto;
218 return inner_mode->input2(x, skb);
219 }
220 EXPORT_SYMBOL(xfrm_prepare_input);
221
xfrm_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type)222 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
223 {
224 struct net *net = dev_net(skb->dev);
225 int err;
226 __be32 seq;
227 __be32 seq_hi;
228 struct xfrm_state *x = NULL;
229 xfrm_address_t *daddr;
230 struct xfrm_mode *inner_mode;
231 u32 mark = skb->mark;
232 unsigned int family = AF_UNSPEC;
233 int decaps = 0;
234 int async = 0;
235 bool xfrm_gro = false;
236 bool crypto_done = false;
237 struct xfrm_offload *xo = xfrm_offload(skb);
238
239 if (encap_type < 0) {
240 x = xfrm_input_state(skb);
241
242 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
243 if (x->km.state == XFRM_STATE_ACQ)
244 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
245 else
246 XFRM_INC_STATS(net,
247 LINUX_MIB_XFRMINSTATEINVALID);
248
249 if (encap_type == -1)
250 dev_put(skb->dev);
251 goto drop;
252 }
253
254 family = x->outer_mode->afinfo->family;
255
256 /* An encap_type of -1 indicates async resumption. */
257 if (encap_type == -1) {
258 async = 1;
259 seq = XFRM_SKB_CB(skb)->seq.input.low;
260 goto resume;
261 }
262
263 /* encap_type < -1 indicates a GRO call. */
264 encap_type = 0;
265 seq = XFRM_SPI_SKB_CB(skb)->seq;
266
267 if (xo && (xo->flags & CRYPTO_DONE)) {
268 crypto_done = true;
269 x = xfrm_input_state(skb);
270 family = XFRM_SPI_SKB_CB(skb)->family;
271
272 if (!(xo->status & CRYPTO_SUCCESS)) {
273 if (xo->status &
274 (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
275 CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
276 CRYPTO_TUNNEL_AH_AUTH_FAILED |
277 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
278
279 xfrm_audit_state_icvfail(x, skb,
280 x->type->proto);
281 x->stats.integrity_failed++;
282 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
283 goto drop;
284 }
285
286 if (xo->status & CRYPTO_INVALID_PROTOCOL) {
287 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
288 goto drop;
289 }
290
291 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
292 goto drop;
293 }
294
295 if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
296 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
297 goto drop;
298 }
299 }
300
301 goto lock;
302 }
303
304 family = XFRM_SPI_SKB_CB(skb)->family;
305
306 /* if tunnel is present override skb->mark value with tunnel i_key */
307 switch (family) {
308 case AF_INET:
309 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
310 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
311 break;
312 case AF_INET6:
313 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
314 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
315 break;
316 }
317
318 err = secpath_set(skb);
319 if (err) {
320 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
321 goto drop;
322 }
323
324 seq = 0;
325 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
326 secpath_reset(skb);
327 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
328 goto drop;
329 }
330
331 daddr = (xfrm_address_t *)(skb_network_header(skb) +
332 XFRM_SPI_SKB_CB(skb)->daddroff);
333 do {
334 if (skb->sp->len == XFRM_MAX_DEPTH) {
335 secpath_reset(skb);
336 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
337 goto drop;
338 }
339
340 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
341 if (x == NULL) {
342 secpath_reset(skb);
343 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
344 xfrm_audit_state_notfound(skb, family, spi, seq);
345 goto drop;
346 }
347
348 skb->mark = xfrm_smark_get(skb->mark, x);
349
350 skb->sp->xvec[skb->sp->len++] = x;
351
352 skb_dst_force(skb);
353 if (!skb_dst(skb)) {
354 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
355 goto drop;
356 }
357
358 lock:
359 spin_lock(&x->lock);
360
361 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
362 if (x->km.state == XFRM_STATE_ACQ)
363 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
364 else
365 XFRM_INC_STATS(net,
366 LINUX_MIB_XFRMINSTATEINVALID);
367 goto drop_unlock;
368 }
369
370 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
371 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
372 goto drop_unlock;
373 }
374
375 if (x->repl->check(x, skb, seq)) {
376 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
377 goto drop_unlock;
378 }
379
380 if (xfrm_state_check_expire(x)) {
381 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
382 goto drop_unlock;
383 }
384
385 spin_unlock(&x->lock);
386
387 if (xfrm_tunnel_check(skb, x, family)) {
388 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
389 goto drop;
390 }
391
392 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
393
394 XFRM_SKB_CB(skb)->seq.input.low = seq;
395 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
396
397 dev_hold(skb->dev);
398
399 if (crypto_done)
400 nexthdr = x->type_offload->input_tail(x, skb);
401 else
402 nexthdr = x->type->input(x, skb);
403
404 if (nexthdr == -EINPROGRESS)
405 return 0;
406 resume:
407 dev_put(skb->dev);
408
409 spin_lock(&x->lock);
410 if (nexthdr <= 0) {
411 if (nexthdr == -EBADMSG) {
412 xfrm_audit_state_icvfail(x, skb,
413 x->type->proto);
414 x->stats.integrity_failed++;
415 }
416 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
417 goto drop_unlock;
418 }
419
420 /* only the first xfrm gets the encap type */
421 encap_type = 0;
422
423 if (async && x->repl->recheck(x, skb, seq)) {
424 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
425 goto drop_unlock;
426 }
427
428 x->repl->advance(x, seq);
429
430 x->curlft.bytes += skb->len;
431 x->curlft.packets++;
432
433 spin_unlock(&x->lock);
434
435 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
436
437 inner_mode = x->inner_mode;
438
439 if (x->sel.family == AF_UNSPEC) {
440 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
441 if (inner_mode == NULL) {
442 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
443 goto drop;
444 }
445 }
446
447 if (inner_mode->input(x, skb)) {
448 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
449 goto drop;
450 }
451
452 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
453 decaps = 1;
454 break;
455 }
456
457 /*
458 * We need the inner address. However, we only get here for
459 * transport mode so the outer address is identical.
460 */
461 daddr = &x->id.daddr;
462 family = x->outer_mode->afinfo->family;
463
464 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
465 if (err < 0) {
466 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
467 goto drop;
468 }
469 crypto_done = false;
470 } while (!err);
471
472 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
473 if (err)
474 goto drop;
475
476 nf_reset(skb);
477
478 if (decaps) {
479 if (skb->sp)
480 skb->sp->olen = 0;
481 skb_dst_drop(skb);
482 gro_cells_receive(&gro_cells, skb);
483 return 0;
484 } else {
485 xo = xfrm_offload(skb);
486 if (xo)
487 xfrm_gro = xo->flags & XFRM_GRO;
488
489 err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
490 if (xfrm_gro) {
491 if (skb->sp)
492 skb->sp->olen = 0;
493 skb_dst_drop(skb);
494 gro_cells_receive(&gro_cells, skb);
495 return err;
496 }
497
498 return err;
499 }
500
501 drop_unlock:
502 spin_unlock(&x->lock);
503 drop:
504 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
505 kfree_skb(skb);
506 return 0;
507 }
508 EXPORT_SYMBOL(xfrm_input);
509
xfrm_input_resume(struct sk_buff * skb,int nexthdr)510 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
511 {
512 return xfrm_input(skb, nexthdr, 0, -1);
513 }
514 EXPORT_SYMBOL(xfrm_input_resume);
515
xfrm_trans_reinject(unsigned long data)516 static void xfrm_trans_reinject(unsigned long data)
517 {
518 struct xfrm_trans_tasklet *trans = (void *)data;
519 struct sk_buff_head queue;
520 struct sk_buff *skb;
521
522 __skb_queue_head_init(&queue);
523 skb_queue_splice_init(&trans->queue, &queue);
524
525 while ((skb = __skb_dequeue(&queue)))
526 XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
527 }
528
xfrm_trans_queue(struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *))529 int xfrm_trans_queue(struct sk_buff *skb,
530 int (*finish)(struct net *, struct sock *,
531 struct sk_buff *))
532 {
533 struct xfrm_trans_tasklet *trans;
534
535 trans = this_cpu_ptr(&xfrm_trans_tasklet);
536
537 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
538 return -ENOBUFS;
539
540 XFRM_TRANS_SKB_CB(skb)->finish = finish;
541 __skb_queue_tail(&trans->queue, skb);
542 tasklet_schedule(&trans->tasklet);
543 return 0;
544 }
545 EXPORT_SYMBOL(xfrm_trans_queue);
546
xfrm_input_init(void)547 void __init xfrm_input_init(void)
548 {
549 int err;
550 int i;
551
552 init_dummy_netdev(&xfrm_napi_dev);
553 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
554 if (err)
555 gro_cells.cells = NULL;
556
557 secpath_cachep = kmem_cache_create("secpath_cache",
558 sizeof(struct sec_path),
559 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
560 NULL);
561
562 for_each_possible_cpu(i) {
563 struct xfrm_trans_tasklet *trans;
564
565 trans = &per_cpu(xfrm_trans_tasklet, i);
566 __skb_queue_head_init(&trans->queue);
567 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
568 (unsigned long)trans);
569 }
570 }
571