1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
14 *
15 */
16
17 #include <linux/compat.h>
18 #include <linux/workqueue.h>
19 #include <net/xfrm.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/ipsec.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/audit.h>
25 #include <linux/uaccess.h>
26 #include <linux/ktime.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30
31 #include <crypto/aead.h>
32
33 #include "xfrm_hash.h"
34
35 #define xfrm_state_deref_prot(table, net) \
36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
37 #define xfrm_state_deref_check(table, net) \
38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
39
40 static void xfrm_state_gc_task(struct work_struct *work);
41
42 /* Each xfrm_state may be linked to two tables:
43
44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
46 destination/tunnel endpoint. (output)
47 */
48
49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
50 static struct kmem_cache *xfrm_state_cache __ro_after_init;
51
52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
53 static HLIST_HEAD(xfrm_state_gc_list);
54 static HLIST_HEAD(xfrm_state_dev_gc_list);
55
xfrm_state_hold_rcu(struct xfrm_state __rcu * x)56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
57 {
58 return refcount_inc_not_zero(&x->refcnt);
59 }
60
xfrm_dst_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u32 reqid,unsigned short family)61 static inline unsigned int xfrm_dst_hash(struct net *net,
62 const xfrm_address_t *daddr,
63 const xfrm_address_t *saddr,
64 u32 reqid,
65 unsigned short family)
66 {
67 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
68
69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
70 }
71
xfrm_src_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)72 static inline unsigned int xfrm_src_hash(struct net *net,
73 const xfrm_address_t *daddr,
74 const xfrm_address_t *saddr,
75 unsigned short family)
76 {
77 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
78
79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
80 }
81
82 static inline unsigned int
xfrm_spi_hash(struct net * net,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
84 __be32 spi, u8 proto, unsigned short family)
85 {
86 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
87
88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
89 }
90
xfrm_seq_hash(struct net * net,u32 seq)91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
92 {
93 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
94
95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
96 }
97
98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \
99 { \
100 struct xfrm_state *_x = NULL; \
101 \
102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \
103 hlist_for_each_entry_rcu(_x, _h, by) { \
104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
105 continue; \
106 break; \
107 } \
108 } \
109 \
110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
111 /* SAD is empty or consist from HW SAs only */ \
112 hlist_add_head_rcu(_n, _h); \
113 else \
114 hlist_add_before_rcu(_n, &_x->by); \
115 }
116
xfrm_hash_transfer(struct hlist_head * list,struct hlist_head * ndsttable,struct hlist_head * nsrctable,struct hlist_head * nspitable,struct hlist_head * nseqtable,unsigned int nhashmask)117 static void xfrm_hash_transfer(struct hlist_head *list,
118 struct hlist_head *ndsttable,
119 struct hlist_head *nsrctable,
120 struct hlist_head *nspitable,
121 struct hlist_head *nseqtable,
122 unsigned int nhashmask)
123 {
124 struct hlist_node *tmp;
125 struct xfrm_state *x;
126
127 hlist_for_each_entry_safe(x, tmp, list, bydst) {
128 unsigned int h;
129
130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
131 x->props.reqid, x->props.family,
132 nhashmask);
133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
134
135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
136 x->props.family,
137 nhashmask);
138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
139
140 if (x->id.spi) {
141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
142 x->id.proto, x->props.family,
143 nhashmask);
144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
145 x->xso.type);
146 }
147
148 if (x->km.seq) {
149 h = __xfrm_seq_hash(x->km.seq, nhashmask);
150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
151 x->xso.type);
152 }
153 }
154 }
155
xfrm_hash_new_size(unsigned int state_hmask)156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
157 {
158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
159 }
160
xfrm_hash_resize(struct work_struct * work)161 static void xfrm_hash_resize(struct work_struct *work)
162 {
163 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
165 unsigned long nsize, osize;
166 unsigned int nhashmask, ohashmask;
167 int i;
168
169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
170 ndst = xfrm_hash_alloc(nsize);
171 if (!ndst)
172 return;
173 nsrc = xfrm_hash_alloc(nsize);
174 if (!nsrc) {
175 xfrm_hash_free(ndst, nsize);
176 return;
177 }
178 nspi = xfrm_hash_alloc(nsize);
179 if (!nspi) {
180 xfrm_hash_free(ndst, nsize);
181 xfrm_hash_free(nsrc, nsize);
182 return;
183 }
184 nseq = xfrm_hash_alloc(nsize);
185 if (!nseq) {
186 xfrm_hash_free(ndst, nsize);
187 xfrm_hash_free(nsrc, nsize);
188 xfrm_hash_free(nspi, nsize);
189 return;
190 }
191
192 spin_lock_bh(&net->xfrm.xfrm_state_lock);
193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
194
195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
197 for (i = net->xfrm.state_hmask; i >= 0; i--)
198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
199
200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
203 ohashmask = net->xfrm.state_hmask;
204
205 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
207 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
208 rcu_assign_pointer(net->xfrm.state_byseq, nseq);
209 net->xfrm.state_hmask = nhashmask;
210
211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
212 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
213
214 osize = (ohashmask + 1) * sizeof(struct hlist_head);
215
216 synchronize_rcu();
217
218 xfrm_hash_free(odst, osize);
219 xfrm_hash_free(osrc, osize);
220 xfrm_hash_free(ospi, osize);
221 xfrm_hash_free(oseq, osize);
222 }
223
224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
226
227 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
229
230 int __xfrm_state_delete(struct xfrm_state *x);
231
232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
233 static bool km_is_alive(const struct km_event *c);
234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
235
xfrm_register_type(const struct xfrm_type * type,unsigned short family)236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
237 {
238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
239 int err = 0;
240
241 if (!afinfo)
242 return -EAFNOSUPPORT;
243
244 #define X(afi, T, name) do { \
245 WARN_ON((afi)->type_ ## name); \
246 (afi)->type_ ## name = (T); \
247 } while (0)
248
249 switch (type->proto) {
250 case IPPROTO_COMP:
251 X(afinfo, type, comp);
252 break;
253 case IPPROTO_AH:
254 X(afinfo, type, ah);
255 break;
256 case IPPROTO_ESP:
257 X(afinfo, type, esp);
258 break;
259 case IPPROTO_IPIP:
260 X(afinfo, type, ipip);
261 break;
262 case IPPROTO_DSTOPTS:
263 X(afinfo, type, dstopts);
264 break;
265 case IPPROTO_ROUTING:
266 X(afinfo, type, routing);
267 break;
268 case IPPROTO_IPV6:
269 X(afinfo, type, ipip6);
270 break;
271 default:
272 WARN_ON(1);
273 err = -EPROTONOSUPPORT;
274 break;
275 }
276 #undef X
277 rcu_read_unlock();
278 return err;
279 }
280 EXPORT_SYMBOL(xfrm_register_type);
281
xfrm_unregister_type(const struct xfrm_type * type,unsigned short family)282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
283 {
284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
285
286 if (unlikely(afinfo == NULL))
287 return;
288
289 #define X(afi, T, name) do { \
290 WARN_ON((afi)->type_ ## name != (T)); \
291 (afi)->type_ ## name = NULL; \
292 } while (0)
293
294 switch (type->proto) {
295 case IPPROTO_COMP:
296 X(afinfo, type, comp);
297 break;
298 case IPPROTO_AH:
299 X(afinfo, type, ah);
300 break;
301 case IPPROTO_ESP:
302 X(afinfo, type, esp);
303 break;
304 case IPPROTO_IPIP:
305 X(afinfo, type, ipip);
306 break;
307 case IPPROTO_DSTOPTS:
308 X(afinfo, type, dstopts);
309 break;
310 case IPPROTO_ROUTING:
311 X(afinfo, type, routing);
312 break;
313 case IPPROTO_IPV6:
314 X(afinfo, type, ipip6);
315 break;
316 default:
317 WARN_ON(1);
318 break;
319 }
320 #undef X
321 rcu_read_unlock();
322 }
323 EXPORT_SYMBOL(xfrm_unregister_type);
324
xfrm_get_type(u8 proto,unsigned short family)325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
326 {
327 const struct xfrm_type *type = NULL;
328 struct xfrm_state_afinfo *afinfo;
329 int modload_attempted = 0;
330
331 retry:
332 afinfo = xfrm_state_get_afinfo(family);
333 if (unlikely(afinfo == NULL))
334 return NULL;
335
336 switch (proto) {
337 case IPPROTO_COMP:
338 type = afinfo->type_comp;
339 break;
340 case IPPROTO_AH:
341 type = afinfo->type_ah;
342 break;
343 case IPPROTO_ESP:
344 type = afinfo->type_esp;
345 break;
346 case IPPROTO_IPIP:
347 type = afinfo->type_ipip;
348 break;
349 case IPPROTO_DSTOPTS:
350 type = afinfo->type_dstopts;
351 break;
352 case IPPROTO_ROUTING:
353 type = afinfo->type_routing;
354 break;
355 case IPPROTO_IPV6:
356 type = afinfo->type_ipip6;
357 break;
358 default:
359 break;
360 }
361
362 if (unlikely(type && !try_module_get(type->owner)))
363 type = NULL;
364
365 rcu_read_unlock();
366
367 if (!type && !modload_attempted) {
368 request_module("xfrm-type-%d-%d", family, proto);
369 modload_attempted = 1;
370 goto retry;
371 }
372
373 return type;
374 }
375
xfrm_put_type(const struct xfrm_type * type)376 static void xfrm_put_type(const struct xfrm_type *type)
377 {
378 module_put(type->owner);
379 }
380
xfrm_register_type_offload(const struct xfrm_type_offload * type,unsigned short family)381 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
382 unsigned short family)
383 {
384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
385 int err = 0;
386
387 if (unlikely(afinfo == NULL))
388 return -EAFNOSUPPORT;
389
390 switch (type->proto) {
391 case IPPROTO_ESP:
392 WARN_ON(afinfo->type_offload_esp);
393 afinfo->type_offload_esp = type;
394 break;
395 default:
396 WARN_ON(1);
397 err = -EPROTONOSUPPORT;
398 break;
399 }
400
401 rcu_read_unlock();
402 return err;
403 }
404 EXPORT_SYMBOL(xfrm_register_type_offload);
405
xfrm_unregister_type_offload(const struct xfrm_type_offload * type,unsigned short family)406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
407 unsigned short family)
408 {
409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
410
411 if (unlikely(afinfo == NULL))
412 return;
413
414 switch (type->proto) {
415 case IPPROTO_ESP:
416 WARN_ON(afinfo->type_offload_esp != type);
417 afinfo->type_offload_esp = NULL;
418 break;
419 default:
420 WARN_ON(1);
421 break;
422 }
423 rcu_read_unlock();
424 }
425 EXPORT_SYMBOL(xfrm_unregister_type_offload);
426
427 static const struct xfrm_type_offload *
xfrm_get_type_offload(u8 proto,unsigned short family,bool try_load)428 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
429 {
430 const struct xfrm_type_offload *type = NULL;
431 struct xfrm_state_afinfo *afinfo;
432
433 retry:
434 afinfo = xfrm_state_get_afinfo(family);
435 if (unlikely(afinfo == NULL))
436 return NULL;
437
438 switch (proto) {
439 case IPPROTO_ESP:
440 type = afinfo->type_offload_esp;
441 break;
442 default:
443 break;
444 }
445
446 if ((type && !try_module_get(type->owner)))
447 type = NULL;
448
449 rcu_read_unlock();
450
451 if (!type && try_load) {
452 request_module("xfrm-offload-%d-%d", family, proto);
453 try_load = false;
454 goto retry;
455 }
456
457 return type;
458 }
459
xfrm_put_type_offload(const struct xfrm_type_offload * type)460 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
461 {
462 module_put(type->owner);
463 }
464
465 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
466 [XFRM_MODE_BEET] = {
467 .encap = XFRM_MODE_BEET,
468 .flags = XFRM_MODE_FLAG_TUNNEL,
469 .family = AF_INET,
470 },
471 [XFRM_MODE_TRANSPORT] = {
472 .encap = XFRM_MODE_TRANSPORT,
473 .family = AF_INET,
474 },
475 [XFRM_MODE_TUNNEL] = {
476 .encap = XFRM_MODE_TUNNEL,
477 .flags = XFRM_MODE_FLAG_TUNNEL,
478 .family = AF_INET,
479 },
480 };
481
482 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
483 [XFRM_MODE_BEET] = {
484 .encap = XFRM_MODE_BEET,
485 .flags = XFRM_MODE_FLAG_TUNNEL,
486 .family = AF_INET6,
487 },
488 [XFRM_MODE_ROUTEOPTIMIZATION] = {
489 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
490 .family = AF_INET6,
491 },
492 [XFRM_MODE_TRANSPORT] = {
493 .encap = XFRM_MODE_TRANSPORT,
494 .family = AF_INET6,
495 },
496 [XFRM_MODE_TUNNEL] = {
497 .encap = XFRM_MODE_TUNNEL,
498 .flags = XFRM_MODE_FLAG_TUNNEL,
499 .family = AF_INET6,
500 },
501 };
502
xfrm_get_mode(unsigned int encap,int family)503 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
504 {
505 const struct xfrm_mode *mode;
506
507 if (unlikely(encap >= XFRM_MODE_MAX))
508 return NULL;
509
510 switch (family) {
511 case AF_INET:
512 mode = &xfrm4_mode_map[encap];
513 if (mode->family == family)
514 return mode;
515 break;
516 case AF_INET6:
517 mode = &xfrm6_mode_map[encap];
518 if (mode->family == family)
519 return mode;
520 break;
521 default:
522 break;
523 }
524
525 return NULL;
526 }
527
xfrm_state_free(struct xfrm_state * x)528 void xfrm_state_free(struct xfrm_state *x)
529 {
530 kmem_cache_free(xfrm_state_cache, x);
531 }
532 EXPORT_SYMBOL(xfrm_state_free);
533
___xfrm_state_destroy(struct xfrm_state * x)534 static void ___xfrm_state_destroy(struct xfrm_state *x)
535 {
536 hrtimer_cancel(&x->mtimer);
537 del_timer_sync(&x->rtimer);
538 kfree(x->aead);
539 kfree(x->aalg);
540 kfree(x->ealg);
541 kfree(x->calg);
542 kfree(x->encap);
543 kfree(x->coaddr);
544 kfree(x->replay_esn);
545 kfree(x->preplay_esn);
546 if (x->type_offload)
547 xfrm_put_type_offload(x->type_offload);
548 if (x->type) {
549 x->type->destructor(x);
550 xfrm_put_type(x->type);
551 }
552 if (x->xfrag.page)
553 put_page(x->xfrag.page);
554 xfrm_dev_state_free(x);
555 security_xfrm_state_free(x);
556 xfrm_state_free(x);
557 }
558
xfrm_state_gc_task(struct work_struct * work)559 static void xfrm_state_gc_task(struct work_struct *work)
560 {
561 struct xfrm_state *x;
562 struct hlist_node *tmp;
563 struct hlist_head gc_list;
564
565 spin_lock_bh(&xfrm_state_gc_lock);
566 hlist_move_list(&xfrm_state_gc_list, &gc_list);
567 spin_unlock_bh(&xfrm_state_gc_lock);
568
569 synchronize_rcu();
570
571 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
572 ___xfrm_state_destroy(x);
573 }
574
xfrm_timer_handler(struct hrtimer * me)575 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
576 {
577 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
578 enum hrtimer_restart ret = HRTIMER_NORESTART;
579 time64_t now = ktime_get_real_seconds();
580 time64_t next = TIME64_MAX;
581 int warn = 0;
582 int err = 0;
583
584 spin_lock(&x->lock);
585 xfrm_dev_state_update_stats(x);
586
587 if (x->km.state == XFRM_STATE_DEAD)
588 goto out;
589 if (x->km.state == XFRM_STATE_EXPIRED)
590 goto expired;
591 if (x->lft.hard_add_expires_seconds) {
592 time64_t tmo = x->lft.hard_add_expires_seconds +
593 x->curlft.add_time - now;
594 if (tmo <= 0) {
595 if (x->xflags & XFRM_SOFT_EXPIRE) {
596 /* enter hard expire without soft expire first?!
597 * setting a new date could trigger this.
598 * workaround: fix x->curflt.add_time by below:
599 */
600 x->curlft.add_time = now - x->saved_tmo - 1;
601 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
602 } else
603 goto expired;
604 }
605 if (tmo < next)
606 next = tmo;
607 }
608 if (x->lft.hard_use_expires_seconds) {
609 time64_t tmo = x->lft.hard_use_expires_seconds +
610 (READ_ONCE(x->curlft.use_time) ? : now) - now;
611 if (tmo <= 0)
612 goto expired;
613 if (tmo < next)
614 next = tmo;
615 }
616 if (x->km.dying)
617 goto resched;
618 if (x->lft.soft_add_expires_seconds) {
619 time64_t tmo = x->lft.soft_add_expires_seconds +
620 x->curlft.add_time - now;
621 if (tmo <= 0) {
622 warn = 1;
623 x->xflags &= ~XFRM_SOFT_EXPIRE;
624 } else if (tmo < next) {
625 next = tmo;
626 x->xflags |= XFRM_SOFT_EXPIRE;
627 x->saved_tmo = tmo;
628 }
629 }
630 if (x->lft.soft_use_expires_seconds) {
631 time64_t tmo = x->lft.soft_use_expires_seconds +
632 (READ_ONCE(x->curlft.use_time) ? : now) - now;
633 if (tmo <= 0)
634 warn = 1;
635 else if (tmo < next)
636 next = tmo;
637 }
638
639 x->km.dying = warn;
640 if (warn)
641 km_state_expired(x, 0, 0);
642 resched:
643 if (next != TIME64_MAX) {
644 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
645 ret = HRTIMER_RESTART;
646 }
647
648 goto out;
649
650 expired:
651 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
652 x->km.state = XFRM_STATE_EXPIRED;
653
654 err = __xfrm_state_delete(x);
655 if (!err)
656 km_state_expired(x, 1, 0);
657
658 xfrm_audit_state_delete(x, err ? 0 : 1, true);
659
660 out:
661 spin_unlock(&x->lock);
662 return ret;
663 }
664
665 static void xfrm_replay_timer_handler(struct timer_list *t);
666
xfrm_state_alloc(struct net * net)667 struct xfrm_state *xfrm_state_alloc(struct net *net)
668 {
669 struct xfrm_state *x;
670
671 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
672
673 if (x) {
674 write_pnet(&x->xs_net, net);
675 refcount_set(&x->refcnt, 1);
676 atomic_set(&x->tunnel_users, 0);
677 INIT_LIST_HEAD(&x->km.all);
678 INIT_HLIST_NODE(&x->state_cache);
679 INIT_HLIST_NODE(&x->bydst);
680 INIT_HLIST_NODE(&x->bysrc);
681 INIT_HLIST_NODE(&x->byspi);
682 INIT_HLIST_NODE(&x->byseq);
683 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
684 x->mtimer.function = xfrm_timer_handler;
685 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
686 x->curlft.add_time = ktime_get_real_seconds();
687 x->lft.soft_byte_limit = XFRM_INF;
688 x->lft.soft_packet_limit = XFRM_INF;
689 x->lft.hard_byte_limit = XFRM_INF;
690 x->lft.hard_packet_limit = XFRM_INF;
691 x->replay_maxage = 0;
692 x->replay_maxdiff = 0;
693 x->pcpu_num = UINT_MAX;
694 spin_lock_init(&x->lock);
695 }
696 return x;
697 }
698 EXPORT_SYMBOL(xfrm_state_alloc);
699
700 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_delete(struct xfrm_state * x)701 void xfrm_dev_state_delete(struct xfrm_state *x)
702 {
703 struct xfrm_dev_offload *xso = &x->xso;
704 struct net_device *dev = READ_ONCE(xso->dev);
705
706 if (dev) {
707 dev->xfrmdev_ops->xdo_dev_state_delete(x);
708 spin_lock_bh(&xfrm_state_dev_gc_lock);
709 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
710 spin_unlock_bh(&xfrm_state_dev_gc_lock);
711 }
712 }
713 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
714
xfrm_dev_state_free(struct xfrm_state * x)715 void xfrm_dev_state_free(struct xfrm_state *x)
716 {
717 struct xfrm_dev_offload *xso = &x->xso;
718 struct net_device *dev = READ_ONCE(xso->dev);
719
720 if (dev && dev->xfrmdev_ops) {
721 spin_lock_bh(&xfrm_state_dev_gc_lock);
722 if (!hlist_unhashed(&x->dev_gclist))
723 hlist_del(&x->dev_gclist);
724 spin_unlock_bh(&xfrm_state_dev_gc_lock);
725
726 if (dev->xfrmdev_ops->xdo_dev_state_free)
727 dev->xfrmdev_ops->xdo_dev_state_free(x);
728 WRITE_ONCE(xso->dev, NULL);
729 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
730 netdev_put(dev, &xso->dev_tracker);
731 }
732 }
733 #endif
734
__xfrm_state_destroy(struct xfrm_state * x,bool sync)735 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
736 {
737 WARN_ON(x->km.state != XFRM_STATE_DEAD);
738
739 if (sync) {
740 synchronize_rcu();
741 ___xfrm_state_destroy(x);
742 } else {
743 spin_lock_bh(&xfrm_state_gc_lock);
744 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
745 spin_unlock_bh(&xfrm_state_gc_lock);
746 schedule_work(&xfrm_state_gc_work);
747 }
748 }
749 EXPORT_SYMBOL(__xfrm_state_destroy);
750
__xfrm_state_delete(struct xfrm_state * x)751 int __xfrm_state_delete(struct xfrm_state *x)
752 {
753 struct net *net = xs_net(x);
754 int err = -ESRCH;
755
756 if (x->km.state != XFRM_STATE_DEAD) {
757 x->km.state = XFRM_STATE_DEAD;
758
759 spin_lock(&net->xfrm.xfrm_state_lock);
760 list_del(&x->km.all);
761 hlist_del_rcu(&x->bydst);
762 hlist_del_rcu(&x->bysrc);
763 if (x->km.seq)
764 hlist_del_rcu(&x->byseq);
765 if (!hlist_unhashed(&x->state_cache))
766 hlist_del_rcu(&x->state_cache);
767 if (!hlist_unhashed(&x->state_cache_input))
768 hlist_del_rcu(&x->state_cache_input);
769
770 if (x->id.spi)
771 hlist_del_rcu(&x->byspi);
772 net->xfrm.state_num--;
773 xfrm_nat_keepalive_state_updated(x);
774 spin_unlock(&net->xfrm.xfrm_state_lock);
775
776 if (x->encap_sk)
777 sock_put(rcu_dereference_raw(x->encap_sk));
778
779 xfrm_dev_state_delete(x);
780
781 /* All xfrm_state objects are created by xfrm_state_alloc.
782 * The xfrm_state_alloc call gives a reference, and that
783 * is what we are dropping here.
784 */
785 xfrm_state_put(x);
786 err = 0;
787 }
788
789 return err;
790 }
791 EXPORT_SYMBOL(__xfrm_state_delete);
792
xfrm_state_delete(struct xfrm_state * x)793 int xfrm_state_delete(struct xfrm_state *x)
794 {
795 int err;
796
797 spin_lock_bh(&x->lock);
798 err = __xfrm_state_delete(x);
799 spin_unlock_bh(&x->lock);
800
801 return err;
802 }
803 EXPORT_SYMBOL(xfrm_state_delete);
804
805 #ifdef CONFIG_SECURITY_NETWORK_XFRM
806 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)807 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
808 {
809 int i, err = 0;
810
811 for (i = 0; i <= net->xfrm.state_hmask; i++) {
812 struct xfrm_state *x;
813
814 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
815 if (xfrm_id_proto_match(x->id.proto, proto) &&
816 (err = security_xfrm_state_delete(x)) != 0) {
817 xfrm_audit_state_delete(x, 0, task_valid);
818 return err;
819 }
820 }
821 }
822
823 return err;
824 }
825
826 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)827 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
828 {
829 int i, err = 0;
830
831 for (i = 0; i <= net->xfrm.state_hmask; i++) {
832 struct xfrm_state *x;
833 struct xfrm_dev_offload *xso;
834
835 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
836 xso = &x->xso;
837
838 if (xso->dev == dev &&
839 (err = security_xfrm_state_delete(x)) != 0) {
840 xfrm_audit_state_delete(x, 0, task_valid);
841 return err;
842 }
843 }
844 }
845
846 return err;
847 }
848 #else
849 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)850 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
851 {
852 return 0;
853 }
854
855 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)856 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
857 {
858 return 0;
859 }
860 #endif
861
xfrm_state_flush(struct net * net,u8 proto,bool task_valid,bool sync)862 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
863 {
864 int i, err = 0, cnt = 0;
865
866 spin_lock_bh(&net->xfrm.xfrm_state_lock);
867 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
868 if (err)
869 goto out;
870
871 err = -ESRCH;
872 for (i = 0; i <= net->xfrm.state_hmask; i++) {
873 struct xfrm_state *x;
874 restart:
875 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
876 if (!xfrm_state_kern(x) &&
877 xfrm_id_proto_match(x->id.proto, proto)) {
878 xfrm_state_hold(x);
879 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
880
881 err = xfrm_state_delete(x);
882 xfrm_audit_state_delete(x, err ? 0 : 1,
883 task_valid);
884 if (sync)
885 xfrm_state_put_sync(x);
886 else
887 xfrm_state_put(x);
888 if (!err)
889 cnt++;
890
891 spin_lock_bh(&net->xfrm.xfrm_state_lock);
892 goto restart;
893 }
894 }
895 }
896 out:
897 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
898 if (cnt)
899 err = 0;
900
901 return err;
902 }
903 EXPORT_SYMBOL(xfrm_state_flush);
904
xfrm_dev_state_flush(struct net * net,struct net_device * dev,bool task_valid)905 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
906 {
907 struct xfrm_state *x;
908 struct hlist_node *tmp;
909 struct xfrm_dev_offload *xso;
910 int i, err = 0, cnt = 0;
911
912 spin_lock_bh(&net->xfrm.xfrm_state_lock);
913 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
914 if (err)
915 goto out;
916
917 err = -ESRCH;
918 for (i = 0; i <= net->xfrm.state_hmask; i++) {
919 restart:
920 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
921 xso = &x->xso;
922
923 if (!xfrm_state_kern(x) && xso->dev == dev) {
924 xfrm_state_hold(x);
925 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
926
927 err = xfrm_state_delete(x);
928 xfrm_dev_state_free(x);
929
930 xfrm_audit_state_delete(x, err ? 0 : 1,
931 task_valid);
932 xfrm_state_put(x);
933 if (!err)
934 cnt++;
935
936 spin_lock_bh(&net->xfrm.xfrm_state_lock);
937 goto restart;
938 }
939 }
940 }
941 if (cnt)
942 err = 0;
943
944 out:
945 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
946
947 spin_lock_bh(&xfrm_state_dev_gc_lock);
948 restart_gc:
949 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
950 xso = &x->xso;
951
952 if (xso->dev == dev) {
953 spin_unlock_bh(&xfrm_state_dev_gc_lock);
954 xfrm_dev_state_free(x);
955 spin_lock_bh(&xfrm_state_dev_gc_lock);
956 goto restart_gc;
957 }
958
959 }
960 spin_unlock_bh(&xfrm_state_dev_gc_lock);
961
962 xfrm_flush_gc();
963
964 return err;
965 }
966 EXPORT_SYMBOL(xfrm_dev_state_flush);
967
xfrm_sad_getinfo(struct net * net,struct xfrmk_sadinfo * si)968 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
969 {
970 spin_lock_bh(&net->xfrm.xfrm_state_lock);
971 si->sadcnt = net->xfrm.state_num;
972 si->sadhcnt = net->xfrm.state_hmask + 1;
973 si->sadhmcnt = xfrm_state_hashmax;
974 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
975 }
976 EXPORT_SYMBOL(xfrm_sad_getinfo);
977
978 static void
__xfrm4_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)979 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
980 {
981 const struct flowi4 *fl4 = &fl->u.ip4;
982
983 sel->daddr.a4 = fl4->daddr;
984 sel->saddr.a4 = fl4->saddr;
985 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
986 sel->dport_mask = htons(0xffff);
987 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
988 sel->sport_mask = htons(0xffff);
989 sel->family = AF_INET;
990 sel->prefixlen_d = 32;
991 sel->prefixlen_s = 32;
992 sel->proto = fl4->flowi4_proto;
993 sel->ifindex = fl4->flowi4_oif;
994 }
995
996 static void
__xfrm6_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)997 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
998 {
999 const struct flowi6 *fl6 = &fl->u.ip6;
1000
1001 /* Initialize temporary selector matching only to current session. */
1002 *(struct in6_addr *)&sel->daddr = fl6->daddr;
1003 *(struct in6_addr *)&sel->saddr = fl6->saddr;
1004 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
1005 sel->dport_mask = htons(0xffff);
1006 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
1007 sel->sport_mask = htons(0xffff);
1008 sel->family = AF_INET6;
1009 sel->prefixlen_d = 128;
1010 sel->prefixlen_s = 128;
1011 sel->proto = fl6->flowi6_proto;
1012 sel->ifindex = fl6->flowi6_oif;
1013 }
1014
1015 static void
xfrm_init_tempstate(struct xfrm_state * x,const struct flowi * fl,const struct xfrm_tmpl * tmpl,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1016 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
1017 const struct xfrm_tmpl *tmpl,
1018 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1019 unsigned short family)
1020 {
1021 switch (family) {
1022 case AF_INET:
1023 __xfrm4_init_tempsel(&x->sel, fl);
1024 break;
1025 case AF_INET6:
1026 __xfrm6_init_tempsel(&x->sel, fl);
1027 break;
1028 }
1029
1030 x->id = tmpl->id;
1031
1032 switch (tmpl->encap_family) {
1033 case AF_INET:
1034 if (x->id.daddr.a4 == 0)
1035 x->id.daddr.a4 = daddr->a4;
1036 x->props.saddr = tmpl->saddr;
1037 if (x->props.saddr.a4 == 0)
1038 x->props.saddr.a4 = saddr->a4;
1039 break;
1040 case AF_INET6:
1041 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
1042 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
1043 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
1044 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
1045 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
1046 break;
1047 }
1048
1049 x->props.mode = tmpl->mode;
1050 x->props.reqid = tmpl->reqid;
1051 x->props.family = tmpl->encap_family;
1052 }
1053
1054 struct xfrm_hash_state_ptrs {
1055 const struct hlist_head *bydst;
1056 const struct hlist_head *bysrc;
1057 const struct hlist_head *byspi;
1058 unsigned int hmask;
1059 };
1060
xfrm_hash_ptrs_get(const struct net * net,struct xfrm_hash_state_ptrs * ptrs)1061 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
1062 {
1063 unsigned int sequence;
1064
1065 do {
1066 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1067
1068 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
1069 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
1070 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
1071 ptrs->hmask = net->xfrm.state_hmask;
1072 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
1073 }
1074
__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family,struct xfrm_dev_offload * xdo)1075 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
1076 u32 mark,
1077 const xfrm_address_t *daddr,
1078 __be32 spi, u8 proto,
1079 unsigned short family,
1080 struct xfrm_dev_offload *xdo)
1081 {
1082 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1083 struct xfrm_state *x;
1084
1085 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1086 #ifdef CONFIG_XFRM_OFFLOAD
1087 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
1088 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1089 /* HW states are in the head of list, there is
1090 * no need to iterate further.
1091 */
1092 break;
1093
1094 /* Packet offload: both policy and SA should
1095 * have same device.
1096 */
1097 if (xdo->dev != x->xso.dev)
1098 continue;
1099 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1100 /* Skip HW policy for SW lookups */
1101 continue;
1102 #endif
1103 if (x->props.family != family ||
1104 x->id.spi != spi ||
1105 x->id.proto != proto ||
1106 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1107 continue;
1108
1109 if ((mark & x->mark.m) != x->mark.v)
1110 continue;
1111 if (!xfrm_state_hold_rcu(x))
1112 continue;
1113 return x;
1114 }
1115
1116 return NULL;
1117 }
1118
__xfrm_state_lookup(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1119 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
1120 u32 mark,
1121 const xfrm_address_t *daddr,
1122 __be32 spi, u8 proto,
1123 unsigned short family)
1124 {
1125 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1126 struct xfrm_state *x;
1127
1128 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1129 if (x->props.family != family ||
1130 x->id.spi != spi ||
1131 x->id.proto != proto ||
1132 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1133 continue;
1134
1135 if ((mark & x->mark.m) != x->mark.v)
1136 continue;
1137 if (!xfrm_state_hold_rcu(x))
1138 continue;
1139 return x;
1140 }
1141
1142 return NULL;
1143 }
1144
xfrm_input_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1145 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1146 const xfrm_address_t *daddr,
1147 __be32 spi, u8 proto,
1148 unsigned short family)
1149 {
1150 struct xfrm_hash_state_ptrs state_ptrs;
1151 struct hlist_head *state_cache_input;
1152 struct xfrm_state *x = NULL;
1153
1154 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
1155
1156 rcu_read_lock();
1157 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
1158 if (x->props.family != family ||
1159 x->id.spi != spi ||
1160 x->id.proto != proto ||
1161 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1162 continue;
1163
1164 if ((mark & x->mark.m) != x->mark.v)
1165 continue;
1166 if (!xfrm_state_hold_rcu(x))
1167 continue;
1168 goto out;
1169 }
1170
1171 xfrm_hash_ptrs_get(net, &state_ptrs);
1172
1173 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
1174
1175 if (x && x->km.state == XFRM_STATE_VALID) {
1176 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1177 if (hlist_unhashed(&x->state_cache_input)) {
1178 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1179 } else {
1180 hlist_del_rcu(&x->state_cache_input);
1181 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1182 }
1183 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1184 }
1185
1186 out:
1187 rcu_read_unlock();
1188 return x;
1189 }
1190 EXPORT_SYMBOL(xfrm_input_state_lookup);
1191
__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)1192 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
1193 u32 mark,
1194 const xfrm_address_t *daddr,
1195 const xfrm_address_t *saddr,
1196 u8 proto, unsigned short family)
1197 {
1198 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
1199 struct xfrm_state *x;
1200
1201 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
1202 if (x->props.family != family ||
1203 x->id.proto != proto ||
1204 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1205 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1206 continue;
1207
1208 if ((mark & x->mark.m) != x->mark.v)
1209 continue;
1210 if (!xfrm_state_hold_rcu(x))
1211 continue;
1212 return x;
1213 }
1214
1215 return NULL;
1216 }
1217
1218 static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state * x,int use_spi,int family)1219 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
1220 {
1221 struct xfrm_hash_state_ptrs state_ptrs;
1222 struct net *net = xs_net(x);
1223 u32 mark = x->mark.v & x->mark.m;
1224
1225 xfrm_hash_ptrs_get(net, &state_ptrs);
1226
1227 if (use_spi)
1228 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
1229 x->id.spi, x->id.proto, family);
1230 else
1231 return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
1232 &x->id.daddr,
1233 &x->props.saddr,
1234 x->id.proto, family);
1235 }
1236
xfrm_hash_grow_check(struct net * net,int have_hash_collision)1237 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
1238 {
1239 if (have_hash_collision &&
1240 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
1241 net->xfrm.state_num > net->xfrm.state_hmask)
1242 schedule_work(&net->xfrm.state_hash_work);
1243 }
1244
xfrm_state_look_at(struct xfrm_policy * pol,struct xfrm_state * x,const struct flowi * fl,unsigned short family,struct xfrm_state ** best,int * acq_in_progress,int * error,unsigned int pcpu_id)1245 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1246 const struct flowi *fl, unsigned short family,
1247 struct xfrm_state **best, int *acq_in_progress,
1248 int *error, unsigned int pcpu_id)
1249 {
1250 /* Resolution logic:
1251 * 1. There is a valid state with matching selector. Done.
1252 * 2. Valid state with inappropriate selector. Skip.
1253 *
1254 * Entering area of "sysdeps".
1255 *
1256 * 3. If state is not valid, selector is temporary, it selects
1257 * only session which triggered previous resolution. Key
1258 * manager will do something to install a state with proper
1259 * selector.
1260 */
1261 if (x->km.state == XFRM_STATE_VALID) {
1262 if ((x->sel.family &&
1263 (x->sel.family != family ||
1264 !xfrm_selector_match(&x->sel, fl, family))) ||
1265 !security_xfrm_state_pol_flow_match(x, pol,
1266 &fl->u.__fl_common))
1267 return;
1268
1269 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
1270 return;
1271
1272 if (!*best ||
1273 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
1274 (*best)->km.dying > x->km.dying ||
1275 ((*best)->km.dying == x->km.dying &&
1276 (*best)->curlft.add_time < x->curlft.add_time))
1277 *best = x;
1278 } else if (x->km.state == XFRM_STATE_ACQ) {
1279 if (!*best || x->pcpu_num == pcpu_id)
1280 *acq_in_progress = 1;
1281 } else if (x->km.state == XFRM_STATE_ERROR ||
1282 x->km.state == XFRM_STATE_EXPIRED) {
1283 if ((!x->sel.family ||
1284 (x->sel.family == family &&
1285 xfrm_selector_match(&x->sel, fl, family))) &&
1286 security_xfrm_state_pol_flow_match(x, pol,
1287 &fl->u.__fl_common))
1288 *error = -ESRCH;
1289 }
1290 }
1291
1292 struct xfrm_state *
xfrm_state_find(const xfrm_address_t * daddr,const xfrm_address_t * saddr,const struct flowi * fl,struct xfrm_tmpl * tmpl,struct xfrm_policy * pol,int * err,unsigned short family,u32 if_id)1293 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1294 const struct flowi *fl, struct xfrm_tmpl *tmpl,
1295 struct xfrm_policy *pol, int *err,
1296 unsigned short family, u32 if_id)
1297 {
1298 static xfrm_address_t saddr_wildcard = { };
1299 struct xfrm_hash_state_ptrs state_ptrs;
1300 struct net *net = xp_net(pol);
1301 unsigned int h, h_wildcard;
1302 struct xfrm_state *x, *x0, *to_put;
1303 int acquire_in_progress = 0;
1304 int error = 0;
1305 struct xfrm_state *best = NULL;
1306 u32 mark = pol->mark.v & pol->mark.m;
1307 unsigned short encap_family = tmpl->encap_family;
1308 unsigned int sequence;
1309 struct km_event c;
1310 unsigned int pcpu_id;
1311 bool cached = false;
1312
1313 /* We need the cpu id just as a lookup key,
1314 * we don't require it to be stable.
1315 */
1316 pcpu_id = raw_smp_processor_id();
1317
1318 to_put = NULL;
1319
1320 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1321
1322 rcu_read_lock();
1323 xfrm_hash_ptrs_get(net, &state_ptrs);
1324
1325 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1326 if (x->props.family == encap_family &&
1327 x->props.reqid == tmpl->reqid &&
1328 (mark & x->mark.m) == x->mark.v &&
1329 x->if_id == if_id &&
1330 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1331 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1332 tmpl->mode == x->props.mode &&
1333 tmpl->id.proto == x->id.proto &&
1334 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1335 xfrm_state_look_at(pol, x, fl, encap_family,
1336 &best, &acquire_in_progress, &error, pcpu_id);
1337 }
1338
1339 if (best)
1340 goto cached;
1341
1342 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1343 if (x->props.family == encap_family &&
1344 x->props.reqid == tmpl->reqid &&
1345 (mark & x->mark.m) == x->mark.v &&
1346 x->if_id == if_id &&
1347 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1348 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1349 tmpl->mode == x->props.mode &&
1350 tmpl->id.proto == x->id.proto &&
1351 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1352 xfrm_state_look_at(pol, x, fl, family,
1353 &best, &acquire_in_progress, &error, pcpu_id);
1354 }
1355
1356 cached:
1357 cached = true;
1358 if (best)
1359 goto found;
1360 else if (error)
1361 best = NULL;
1362 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
1363 WARN_ON(1);
1364
1365 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
1366 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
1367 #ifdef CONFIG_XFRM_OFFLOAD
1368 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1369 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1370 /* HW states are in the head of list, there is
1371 * no need to iterate further.
1372 */
1373 break;
1374
1375 /* Packet offload: both policy and SA should
1376 * have same device.
1377 */
1378 if (pol->xdo.dev != x->xso.dev)
1379 continue;
1380 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1381 /* Skip HW policy for SW lookups */
1382 continue;
1383 #endif
1384 if (x->props.family == encap_family &&
1385 x->props.reqid == tmpl->reqid &&
1386 (mark & x->mark.m) == x->mark.v &&
1387 x->if_id == if_id &&
1388 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1389 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1390 tmpl->mode == x->props.mode &&
1391 tmpl->id.proto == x->id.proto &&
1392 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1393 xfrm_state_look_at(pol, x, fl, family,
1394 &best, &acquire_in_progress, &error, pcpu_id);
1395 }
1396 if (best || acquire_in_progress)
1397 goto found;
1398
1399 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
1400 encap_family, state_ptrs.hmask);
1401 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
1402 #ifdef CONFIG_XFRM_OFFLOAD
1403 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1404 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1405 /* HW states are in the head of list, there is
1406 * no need to iterate further.
1407 */
1408 break;
1409
1410 /* Packet offload: both policy and SA should
1411 * have same device.
1412 */
1413 if (pol->xdo.dev != x->xso.dev)
1414 continue;
1415 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1416 /* Skip HW policy for SW lookups */
1417 continue;
1418 #endif
1419 if (x->props.family == encap_family &&
1420 x->props.reqid == tmpl->reqid &&
1421 (mark & x->mark.m) == x->mark.v &&
1422 x->if_id == if_id &&
1423 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1424 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1425 tmpl->mode == x->props.mode &&
1426 tmpl->id.proto == x->id.proto &&
1427 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1428 xfrm_state_look_at(pol, x, fl, family,
1429 &best, &acquire_in_progress, &error, pcpu_id);
1430 }
1431
1432 found:
1433 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
1434 (best && (best->pcpu_num == pcpu_id)))
1435 x = best;
1436
1437 if (!x && !error && !acquire_in_progress) {
1438 if (tmpl->id.spi &&
1439 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
1440 tmpl->id.spi, tmpl->id.proto,
1441 encap_family,
1442 &pol->xdo)) != NULL) {
1443 to_put = x0;
1444 error = -EEXIST;
1445 goto out;
1446 }
1447
1448 c.net = net;
1449 /* If the KMs have no listeners (yet...), avoid allocating an SA
1450 * for each and every packet - garbage collection might not
1451 * handle the flood.
1452 */
1453 if (!km_is_alive(&c)) {
1454 error = -ESRCH;
1455 goto out;
1456 }
1457
1458 x = xfrm_state_alloc(net);
1459 if (x == NULL) {
1460 error = -ENOMEM;
1461 goto out;
1462 }
1463 /* Initialize temporary state matching only
1464 * to current session. */
1465 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1466 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1467 x->if_id = if_id;
1468 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
1469 x->pcpu_num = pcpu_id;
1470
1471 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1472 if (error) {
1473 x->km.state = XFRM_STATE_DEAD;
1474 to_put = x;
1475 x = NULL;
1476 goto out;
1477 }
1478 #ifdef CONFIG_XFRM_OFFLOAD
1479 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1480 struct xfrm_dev_offload *xdo = &pol->xdo;
1481 struct xfrm_dev_offload *xso = &x->xso;
1482
1483 xso->type = XFRM_DEV_OFFLOAD_PACKET;
1484 xso->dir = xdo->dir;
1485 xso->dev = xdo->dev;
1486 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
1487 netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
1488 error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
1489 if (error) {
1490 xso->dir = 0;
1491 netdev_put(xso->dev, &xso->dev_tracker);
1492 xso->dev = NULL;
1493 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
1494 x->km.state = XFRM_STATE_DEAD;
1495 to_put = x;
1496 x = NULL;
1497 goto out;
1498 }
1499 }
1500 #endif
1501 if (km_query(x, tmpl, pol) == 0) {
1502 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1503 x->km.state = XFRM_STATE_ACQ;
1504 x->dir = XFRM_SA_DIR_OUT;
1505 list_add(&x->km.all, &net->xfrm.state_all);
1506 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1507 XFRM_STATE_INSERT(bydst, &x->bydst,
1508 net->xfrm.state_bydst + h,
1509 x->xso.type);
1510 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1511 XFRM_STATE_INSERT(bysrc, &x->bysrc,
1512 net->xfrm.state_bysrc + h,
1513 x->xso.type);
1514 INIT_HLIST_NODE(&x->state_cache);
1515 if (x->id.spi) {
1516 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1517 XFRM_STATE_INSERT(byspi, &x->byspi,
1518 net->xfrm.state_byspi + h,
1519 x->xso.type);
1520 }
1521 if (x->km.seq) {
1522 h = xfrm_seq_hash(net, x->km.seq);
1523 XFRM_STATE_INSERT(byseq, &x->byseq,
1524 net->xfrm.state_byseq + h,
1525 x->xso.type);
1526 }
1527 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1528 hrtimer_start(&x->mtimer,
1529 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1530 HRTIMER_MODE_REL_SOFT);
1531 net->xfrm.state_num++;
1532 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1533 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1534 } else {
1535 #ifdef CONFIG_XFRM_OFFLOAD
1536 struct xfrm_dev_offload *xso = &x->xso;
1537
1538 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
1539 xfrm_dev_state_delete(x);
1540 xfrm_dev_state_free(x);
1541 }
1542 #endif
1543 x->km.state = XFRM_STATE_DEAD;
1544 to_put = x;
1545 x = NULL;
1546 error = -ESRCH;
1547 }
1548
1549 /* Use the already installed 'fallback' while the CPU-specific
1550 * SA acquire is handled*/
1551 if (best)
1552 x = best;
1553 }
1554 out:
1555 if (x) {
1556 if (!xfrm_state_hold_rcu(x)) {
1557 *err = -EAGAIN;
1558 x = NULL;
1559 }
1560 } else {
1561 *err = acquire_in_progress ? -EAGAIN : error;
1562 }
1563
1564 if (x && x->km.state == XFRM_STATE_VALID && !cached &&
1565 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
1566 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1567 if (hlist_unhashed(&x->state_cache))
1568 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
1569 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1570 }
1571
1572 rcu_read_unlock();
1573 if (to_put)
1574 xfrm_state_put(to_put);
1575
1576 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1577 *err = -EAGAIN;
1578 if (x) {
1579 xfrm_state_put(x);
1580 x = NULL;
1581 }
1582 }
1583
1584 return x;
1585 }
1586
1587 struct xfrm_state *
xfrm_stateonly_find(struct net * net,u32 mark,u32 if_id,xfrm_address_t * daddr,xfrm_address_t * saddr,unsigned short family,u8 mode,u8 proto,u32 reqid)1588 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1589 xfrm_address_t *daddr, xfrm_address_t *saddr,
1590 unsigned short family, u8 mode, u8 proto, u32 reqid)
1591 {
1592 unsigned int h;
1593 struct xfrm_state *rx = NULL, *x = NULL;
1594
1595 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1596 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1597 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1598 if (x->props.family == family &&
1599 x->props.reqid == reqid &&
1600 (mark & x->mark.m) == x->mark.v &&
1601 x->if_id == if_id &&
1602 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1603 xfrm_state_addr_check(x, daddr, saddr, family) &&
1604 mode == x->props.mode &&
1605 proto == x->id.proto &&
1606 x->km.state == XFRM_STATE_VALID) {
1607 rx = x;
1608 break;
1609 }
1610 }
1611
1612 if (rx)
1613 xfrm_state_hold(rx);
1614 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1615
1616
1617 return rx;
1618 }
1619 EXPORT_SYMBOL(xfrm_stateonly_find);
1620
xfrm_state_lookup_byspi(struct net * net,__be32 spi,unsigned short family)1621 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1622 unsigned short family)
1623 {
1624 struct xfrm_state *x;
1625 struct xfrm_state_walk *w;
1626
1627 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1628 list_for_each_entry(w, &net->xfrm.state_all, all) {
1629 x = container_of(w, struct xfrm_state, km);
1630 if (x->props.family != family ||
1631 x->id.spi != spi)
1632 continue;
1633
1634 xfrm_state_hold(x);
1635 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1636 return x;
1637 }
1638 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1639 return NULL;
1640 }
1641 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1642
__xfrm_state_insert(struct xfrm_state * x)1643 static void __xfrm_state_insert(struct xfrm_state *x)
1644 {
1645 struct net *net = xs_net(x);
1646 unsigned int h;
1647
1648 list_add(&x->km.all, &net->xfrm.state_all);
1649
1650 /* Sanitize mark before store */
1651 x->mark.v &= x->mark.m;
1652
1653 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1654 x->props.reqid, x->props.family);
1655 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1656 x->xso.type);
1657
1658 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1659 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1660 x->xso.type);
1661
1662 if (x->id.spi) {
1663 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1664 x->props.family);
1665
1666 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
1667 x->xso.type);
1668 }
1669
1670 if (x->km.seq) {
1671 h = xfrm_seq_hash(net, x->km.seq);
1672
1673 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
1674 x->xso.type);
1675 }
1676
1677 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
1678 if (x->replay_maxage)
1679 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1680
1681 net->xfrm.state_num++;
1682
1683 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1684 xfrm_nat_keepalive_state_updated(x);
1685 }
1686
1687 /* net->xfrm.xfrm_state_lock is held */
__xfrm_state_bump_genids(struct xfrm_state * xnew)1688 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1689 {
1690 struct net *net = xs_net(xnew);
1691 unsigned short family = xnew->props.family;
1692 u32 reqid = xnew->props.reqid;
1693 struct xfrm_state *x;
1694 unsigned int h;
1695 u32 mark = xnew->mark.v & xnew->mark.m;
1696 u32 if_id = xnew->if_id;
1697 u32 cpu_id = xnew->pcpu_num;
1698
1699 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1700 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1701 if (x->props.family == family &&
1702 x->props.reqid == reqid &&
1703 x->if_id == if_id &&
1704 x->pcpu_num == cpu_id &&
1705 (mark & x->mark.m) == x->mark.v &&
1706 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1707 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1708 x->genid++;
1709 }
1710 }
1711
xfrm_state_insert(struct xfrm_state * x)1712 void xfrm_state_insert(struct xfrm_state *x)
1713 {
1714 struct net *net = xs_net(x);
1715
1716 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1717 __xfrm_state_bump_genids(x);
1718 __xfrm_state_insert(x);
1719 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1720 }
1721 EXPORT_SYMBOL(xfrm_state_insert);
1722
1723 /* net->xfrm.xfrm_state_lock is held */
__find_acq_core(struct net * net,const struct xfrm_mark * m,unsigned short family,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create)1724 static struct xfrm_state *__find_acq_core(struct net *net,
1725 const struct xfrm_mark *m,
1726 unsigned short family, u8 mode,
1727 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1728 const xfrm_address_t *daddr,
1729 const xfrm_address_t *saddr,
1730 int create)
1731 {
1732 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1733 struct xfrm_state *x;
1734 u32 mark = m->v & m->m;
1735
1736 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1737 if (x->props.reqid != reqid ||
1738 x->props.mode != mode ||
1739 x->props.family != family ||
1740 x->km.state != XFRM_STATE_ACQ ||
1741 x->id.spi != 0 ||
1742 x->id.proto != proto ||
1743 (mark & x->mark.m) != x->mark.v ||
1744 x->pcpu_num != pcpu_num ||
1745 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1746 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1747 continue;
1748
1749 xfrm_state_hold(x);
1750 return x;
1751 }
1752
1753 if (!create)
1754 return NULL;
1755
1756 x = xfrm_state_alloc(net);
1757 if (likely(x)) {
1758 switch (family) {
1759 case AF_INET:
1760 x->sel.daddr.a4 = daddr->a4;
1761 x->sel.saddr.a4 = saddr->a4;
1762 x->sel.prefixlen_d = 32;
1763 x->sel.prefixlen_s = 32;
1764 x->props.saddr.a4 = saddr->a4;
1765 x->id.daddr.a4 = daddr->a4;
1766 break;
1767
1768 case AF_INET6:
1769 x->sel.daddr.in6 = daddr->in6;
1770 x->sel.saddr.in6 = saddr->in6;
1771 x->sel.prefixlen_d = 128;
1772 x->sel.prefixlen_s = 128;
1773 x->props.saddr.in6 = saddr->in6;
1774 x->id.daddr.in6 = daddr->in6;
1775 break;
1776 }
1777
1778 x->pcpu_num = pcpu_num;
1779 x->km.state = XFRM_STATE_ACQ;
1780 x->id.proto = proto;
1781 x->props.family = family;
1782 x->props.mode = mode;
1783 x->props.reqid = reqid;
1784 x->if_id = if_id;
1785 x->mark.v = m->v;
1786 x->mark.m = m->m;
1787 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1788 xfrm_state_hold(x);
1789 hrtimer_start(&x->mtimer,
1790 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1791 HRTIMER_MODE_REL_SOFT);
1792 list_add(&x->km.all, &net->xfrm.state_all);
1793 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1794 x->xso.type);
1795 h = xfrm_src_hash(net, daddr, saddr, family);
1796 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1797 x->xso.type);
1798
1799 net->xfrm.state_num++;
1800
1801 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1802 }
1803
1804 return x;
1805 }
1806
1807 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1808
xfrm_state_add(struct xfrm_state * x)1809 int xfrm_state_add(struct xfrm_state *x)
1810 {
1811 struct net *net = xs_net(x);
1812 struct xfrm_state *x1, *to_put;
1813 int family;
1814 int err;
1815 u32 mark = x->mark.v & x->mark.m;
1816 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1817
1818 family = x->props.family;
1819
1820 to_put = NULL;
1821
1822 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1823
1824 x1 = __xfrm_state_locate(x, use_spi, family);
1825 if (x1) {
1826 to_put = x1;
1827 x1 = NULL;
1828 err = -EEXIST;
1829 goto out;
1830 }
1831
1832 if (use_spi && x->km.seq) {
1833 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
1834 if (x1 && ((x1->id.proto != x->id.proto) ||
1835 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1836 to_put = x1;
1837 x1 = NULL;
1838 }
1839 }
1840
1841 if (use_spi && !x1)
1842 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1843 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
1844 &x->id.daddr, &x->props.saddr, 0);
1845
1846 __xfrm_state_bump_genids(x);
1847 __xfrm_state_insert(x);
1848 err = 0;
1849
1850 out:
1851 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1852
1853 if (x1) {
1854 xfrm_state_delete(x1);
1855 xfrm_state_put(x1);
1856 }
1857
1858 if (to_put)
1859 xfrm_state_put(to_put);
1860
1861 return err;
1862 }
1863 EXPORT_SYMBOL(xfrm_state_add);
1864
1865 #ifdef CONFIG_XFRM_MIGRATE
clone_security(struct xfrm_state * x,struct xfrm_sec_ctx * security)1866 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1867 {
1868 struct xfrm_user_sec_ctx *uctx;
1869 int size = sizeof(*uctx) + security->ctx_len;
1870 int err;
1871
1872 uctx = kmalloc(size, GFP_KERNEL);
1873 if (!uctx)
1874 return -ENOMEM;
1875
1876 uctx->exttype = XFRMA_SEC_CTX;
1877 uctx->len = size;
1878 uctx->ctx_doi = security->ctx_doi;
1879 uctx->ctx_alg = security->ctx_alg;
1880 uctx->ctx_len = security->ctx_len;
1881 memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1882 err = security_xfrm_state_alloc(x, uctx);
1883 kfree(uctx);
1884 if (err)
1885 return err;
1886
1887 return 0;
1888 }
1889
xfrm_state_clone(struct xfrm_state * orig,struct xfrm_encap_tmpl * encap)1890 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1891 struct xfrm_encap_tmpl *encap)
1892 {
1893 struct net *net = xs_net(orig);
1894 struct xfrm_state *x = xfrm_state_alloc(net);
1895 if (!x)
1896 goto out;
1897
1898 memcpy(&x->id, &orig->id, sizeof(x->id));
1899 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1900 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1901 x->props.mode = orig->props.mode;
1902 x->props.replay_window = orig->props.replay_window;
1903 x->props.reqid = orig->props.reqid;
1904 x->props.family = orig->props.family;
1905 x->props.saddr = orig->props.saddr;
1906
1907 if (orig->aalg) {
1908 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1909 if (!x->aalg)
1910 goto error;
1911 }
1912 x->props.aalgo = orig->props.aalgo;
1913
1914 if (orig->aead) {
1915 x->aead = xfrm_algo_aead_clone(orig->aead);
1916 x->geniv = orig->geniv;
1917 if (!x->aead)
1918 goto error;
1919 }
1920 if (orig->ealg) {
1921 x->ealg = xfrm_algo_clone(orig->ealg);
1922 if (!x->ealg)
1923 goto error;
1924 }
1925 x->props.ealgo = orig->props.ealgo;
1926
1927 if (orig->calg) {
1928 x->calg = xfrm_algo_clone(orig->calg);
1929 if (!x->calg)
1930 goto error;
1931 }
1932 x->props.calgo = orig->props.calgo;
1933
1934 if (encap || orig->encap) {
1935 if (encap)
1936 x->encap = kmemdup(encap, sizeof(*x->encap),
1937 GFP_KERNEL);
1938 else
1939 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1940 GFP_KERNEL);
1941
1942 if (!x->encap)
1943 goto error;
1944 }
1945
1946 if (orig->security)
1947 if (clone_security(x, orig->security))
1948 goto error;
1949
1950 if (orig->coaddr) {
1951 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1952 GFP_KERNEL);
1953 if (!x->coaddr)
1954 goto error;
1955 }
1956
1957 if (orig->replay_esn) {
1958 if (xfrm_replay_clone(x, orig))
1959 goto error;
1960 }
1961
1962 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1963 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
1964
1965 x->props.flags = orig->props.flags;
1966 x->props.extra_flags = orig->props.extra_flags;
1967
1968 x->pcpu_num = orig->pcpu_num;
1969 x->if_id = orig->if_id;
1970 x->tfcpad = orig->tfcpad;
1971 x->replay_maxdiff = orig->replay_maxdiff;
1972 x->replay_maxage = orig->replay_maxage;
1973 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
1974 x->km.state = orig->km.state;
1975 x->km.seq = orig->km.seq;
1976 x->replay = orig->replay;
1977 x->preplay = orig->preplay;
1978 x->mapping_maxage = orig->mapping_maxage;
1979 x->lastused = orig->lastused;
1980 x->new_mapping = 0;
1981 x->new_mapping_sport = 0;
1982 x->dir = orig->dir;
1983
1984 return x;
1985
1986 error:
1987 xfrm_state_put(x);
1988 out:
1989 return NULL;
1990 }
1991
xfrm_migrate_state_find(struct xfrm_migrate * m,struct net * net,u32 if_id)1992 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1993 u32 if_id)
1994 {
1995 unsigned int h;
1996 struct xfrm_state *x = NULL;
1997
1998 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1999
2000 if (m->reqid) {
2001 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
2002 m->reqid, m->old_family);
2003 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
2004 if (x->props.mode != m->mode ||
2005 x->id.proto != m->proto)
2006 continue;
2007 if (m->reqid && x->props.reqid != m->reqid)
2008 continue;
2009 if (if_id != 0 && x->if_id != if_id)
2010 continue;
2011 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2012 m->old_family) ||
2013 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2014 m->old_family))
2015 continue;
2016 xfrm_state_hold(x);
2017 break;
2018 }
2019 } else {
2020 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
2021 m->old_family);
2022 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
2023 if (x->props.mode != m->mode ||
2024 x->id.proto != m->proto)
2025 continue;
2026 if (if_id != 0 && x->if_id != if_id)
2027 continue;
2028 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2029 m->old_family) ||
2030 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2031 m->old_family))
2032 continue;
2033 xfrm_state_hold(x);
2034 break;
2035 }
2036 }
2037
2038 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2039
2040 return x;
2041 }
2042 EXPORT_SYMBOL(xfrm_migrate_state_find);
2043
xfrm_state_migrate(struct xfrm_state * x,struct xfrm_migrate * m,struct xfrm_encap_tmpl * encap,struct net * net,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2044 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
2045 struct xfrm_migrate *m,
2046 struct xfrm_encap_tmpl *encap,
2047 struct net *net,
2048 struct xfrm_user_offload *xuo,
2049 struct netlink_ext_ack *extack)
2050 {
2051 struct xfrm_state *xc;
2052
2053 xc = xfrm_state_clone(x, encap);
2054 if (!xc)
2055 return NULL;
2056
2057 xc->props.family = m->new_family;
2058
2059 if (xfrm_init_state(xc) < 0)
2060 goto error;
2061
2062 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
2063 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
2064
2065 /* configure the hardware if offload is requested */
2066 if (xuo && xfrm_dev_state_add(net, xc, xuo, extack))
2067 goto error;
2068
2069 /* add state */
2070 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
2071 /* a care is needed when the destination address of the
2072 state is to be updated as it is a part of triplet */
2073 xfrm_state_insert(xc);
2074 } else {
2075 if (xfrm_state_add(xc) < 0)
2076 goto error;
2077 }
2078
2079 return xc;
2080 error:
2081 xfrm_state_put(xc);
2082 return NULL;
2083 }
2084 EXPORT_SYMBOL(xfrm_state_migrate);
2085 #endif
2086
xfrm_state_update(struct xfrm_state * x)2087 int xfrm_state_update(struct xfrm_state *x)
2088 {
2089 struct xfrm_state *x1, *to_put;
2090 int err;
2091 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
2092 struct net *net = xs_net(x);
2093
2094 to_put = NULL;
2095
2096 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2097 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
2098
2099 err = -ESRCH;
2100 if (!x1)
2101 goto out;
2102
2103 if (xfrm_state_kern(x1)) {
2104 to_put = x1;
2105 err = -EEXIST;
2106 goto out;
2107 }
2108
2109 if (x1->km.state == XFRM_STATE_ACQ) {
2110 if (x->dir && x1->dir != x->dir)
2111 goto out;
2112
2113 __xfrm_state_insert(x);
2114 x = NULL;
2115 } else {
2116 if (x1->dir != x->dir)
2117 goto out;
2118 }
2119 err = 0;
2120
2121 out:
2122 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2123
2124 if (to_put)
2125 xfrm_state_put(to_put);
2126
2127 if (err)
2128 return err;
2129
2130 if (!x) {
2131 xfrm_state_delete(x1);
2132 xfrm_state_put(x1);
2133 return 0;
2134 }
2135
2136 err = -EINVAL;
2137 spin_lock_bh(&x1->lock);
2138 if (likely(x1->km.state == XFRM_STATE_VALID)) {
2139 if (x->encap && x1->encap &&
2140 x->encap->encap_type == x1->encap->encap_type)
2141 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
2142 else if (x->encap || x1->encap)
2143 goto fail;
2144
2145 if (x->coaddr && x1->coaddr) {
2146 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
2147 }
2148 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
2149 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
2150 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
2151 x1->km.dying = 0;
2152
2153 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
2154 HRTIMER_MODE_REL_SOFT);
2155 if (READ_ONCE(x1->curlft.use_time))
2156 xfrm_state_check_expire(x1);
2157
2158 if (x->props.smark.m || x->props.smark.v || x->if_id) {
2159 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2160
2161 if (x->props.smark.m || x->props.smark.v)
2162 x1->props.smark = x->props.smark;
2163
2164 if (x->if_id)
2165 x1->if_id = x->if_id;
2166
2167 __xfrm_state_bump_genids(x1);
2168 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2169 }
2170
2171 err = 0;
2172 x->km.state = XFRM_STATE_DEAD;
2173 __xfrm_state_put(x);
2174 }
2175
2176 fail:
2177 spin_unlock_bh(&x1->lock);
2178
2179 xfrm_state_put(x1);
2180
2181 return err;
2182 }
2183 EXPORT_SYMBOL(xfrm_state_update);
2184
xfrm_state_check_expire(struct xfrm_state * x)2185 int xfrm_state_check_expire(struct xfrm_state *x)
2186 {
2187 xfrm_dev_state_update_stats(x);
2188
2189 if (!READ_ONCE(x->curlft.use_time))
2190 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
2191
2192 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
2193 x->curlft.packets >= x->lft.hard_packet_limit) {
2194 x->km.state = XFRM_STATE_EXPIRED;
2195 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
2196 return -EINVAL;
2197 }
2198
2199 if (!x->km.dying &&
2200 (x->curlft.bytes >= x->lft.soft_byte_limit ||
2201 x->curlft.packets >= x->lft.soft_packet_limit)) {
2202 x->km.dying = 1;
2203 km_state_expired(x, 0, 0);
2204 }
2205 return 0;
2206 }
2207 EXPORT_SYMBOL(xfrm_state_check_expire);
2208
xfrm_state_update_stats(struct net * net)2209 void xfrm_state_update_stats(struct net *net)
2210 {
2211 struct xfrm_state *x;
2212 int i;
2213
2214 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2215 for (i = 0; i <= net->xfrm.state_hmask; i++) {
2216 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
2217 xfrm_dev_state_update_stats(x);
2218 }
2219 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2220 }
2221
2222 struct xfrm_state *
xfrm_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)2223 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
2224 u8 proto, unsigned short family)
2225 {
2226 struct xfrm_hash_state_ptrs state_ptrs;
2227 struct xfrm_state *x;
2228
2229 rcu_read_lock();
2230 xfrm_hash_ptrs_get(net, &state_ptrs);
2231
2232 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
2233 rcu_read_unlock();
2234 return x;
2235 }
2236 EXPORT_SYMBOL(xfrm_state_lookup);
2237
2238 struct xfrm_state *
xfrm_state_lookup_byaddr(struct net * net,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)2239 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
2240 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
2241 u8 proto, unsigned short family)
2242 {
2243 struct xfrm_hash_state_ptrs state_ptrs;
2244 struct xfrm_state *x;
2245
2246 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2247
2248 xfrm_hash_ptrs_get(net, &state_ptrs);
2249
2250 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
2251 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2252 return x;
2253 }
2254 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
2255
2256 struct xfrm_state *
xfrm_find_acq(struct net * net,const struct xfrm_mark * mark,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create,unsigned short family)2257 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
2258 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
2259 const xfrm_address_t *saddr, int create, unsigned short family)
2260 {
2261 struct xfrm_state *x;
2262
2263 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2264 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
2265 proto, daddr, saddr, create);
2266 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2267
2268 return x;
2269 }
2270 EXPORT_SYMBOL(xfrm_find_acq);
2271
2272 #ifdef CONFIG_XFRM_SUB_POLICY
2273 #if IS_ENABLED(CONFIG_IPV6)
2274 /* distribution counting sort function for xfrm_state and xfrm_tmpl */
2275 static void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2276 __xfrm6_sort(void **dst, void **src, int n,
2277 int (*cmp)(const void *p), int maxclass)
2278 {
2279 int count[XFRM_MAX_DEPTH] = { };
2280 int class[XFRM_MAX_DEPTH];
2281 int i;
2282
2283 for (i = 0; i < n; i++) {
2284 int c = cmp(src[i]);
2285
2286 class[i] = c;
2287 count[c]++;
2288 }
2289
2290 for (i = 2; i < maxclass; i++)
2291 count[i] += count[i - 1];
2292
2293 for (i = 0; i < n; i++) {
2294 dst[count[class[i] - 1]++] = src[i];
2295 src[i] = NULL;
2296 }
2297 }
2298
2299 /* Rule for xfrm_state:
2300 *
2301 * rule 1: select IPsec transport except AH
2302 * rule 2: select MIPv6 RO or inbound trigger
2303 * rule 3: select IPsec transport AH
2304 * rule 4: select IPsec tunnel
2305 * rule 5: others
2306 */
__xfrm6_state_sort_cmp(const void * p)2307 static int __xfrm6_state_sort_cmp(const void *p)
2308 {
2309 const struct xfrm_state *v = p;
2310
2311 switch (v->props.mode) {
2312 case XFRM_MODE_TRANSPORT:
2313 if (v->id.proto != IPPROTO_AH)
2314 return 1;
2315 else
2316 return 3;
2317 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2318 case XFRM_MODE_ROUTEOPTIMIZATION:
2319 case XFRM_MODE_IN_TRIGGER:
2320 return 2;
2321 #endif
2322 case XFRM_MODE_TUNNEL:
2323 case XFRM_MODE_BEET:
2324 return 4;
2325 }
2326 return 5;
2327 }
2328
2329 /* Rule for xfrm_tmpl:
2330 *
2331 * rule 1: select IPsec transport
2332 * rule 2: select MIPv6 RO or inbound trigger
2333 * rule 3: select IPsec tunnel
2334 * rule 4: others
2335 */
__xfrm6_tmpl_sort_cmp(const void * p)2336 static int __xfrm6_tmpl_sort_cmp(const void *p)
2337 {
2338 const struct xfrm_tmpl *v = p;
2339
2340 switch (v->mode) {
2341 case XFRM_MODE_TRANSPORT:
2342 return 1;
2343 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2344 case XFRM_MODE_ROUTEOPTIMIZATION:
2345 case XFRM_MODE_IN_TRIGGER:
2346 return 2;
2347 #endif
2348 case XFRM_MODE_TUNNEL:
2349 case XFRM_MODE_BEET:
2350 return 3;
2351 }
2352 return 4;
2353 }
2354 #else
__xfrm6_state_sort_cmp(const void * p)2355 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
__xfrm6_tmpl_sort_cmp(const void * p)2356 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
2357
2358 static inline void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2359 __xfrm6_sort(void **dst, void **src, int n,
2360 int (*cmp)(const void *p), int maxclass)
2361 {
2362 int i;
2363
2364 for (i = 0; i < n; i++)
2365 dst[i] = src[i];
2366 }
2367 #endif /* CONFIG_IPV6 */
2368
2369 void
xfrm_tmpl_sort(struct xfrm_tmpl ** dst,struct xfrm_tmpl ** src,int n,unsigned short family)2370 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
2371 unsigned short family)
2372 {
2373 int i;
2374
2375 if (family == AF_INET6)
2376 __xfrm6_sort((void **)dst, (void **)src, n,
2377 __xfrm6_tmpl_sort_cmp, 5);
2378 else
2379 for (i = 0; i < n; i++)
2380 dst[i] = src[i];
2381 }
2382
2383 void
xfrm_state_sort(struct xfrm_state ** dst,struct xfrm_state ** src,int n,unsigned short family)2384 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
2385 unsigned short family)
2386 {
2387 int i;
2388
2389 if (family == AF_INET6)
2390 __xfrm6_sort((void **)dst, (void **)src, n,
2391 __xfrm6_state_sort_cmp, 6);
2392 else
2393 for (i = 0; i < n; i++)
2394 dst[i] = src[i];
2395 }
2396 #endif
2397
2398 /* Silly enough, but I'm lazy to build resolution list */
2399
__xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2400 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2401 {
2402 unsigned int h = xfrm_seq_hash(net, seq);
2403 struct xfrm_state *x;
2404
2405 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
2406 if (x->km.seq == seq &&
2407 (mark & x->mark.m) == x->mark.v &&
2408 x->pcpu_num == pcpu_num &&
2409 x->km.state == XFRM_STATE_ACQ) {
2410 xfrm_state_hold(x);
2411 return x;
2412 }
2413 }
2414
2415 return NULL;
2416 }
2417
xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2418 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2419 {
2420 struct xfrm_state *x;
2421
2422 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2423 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
2424 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2425 return x;
2426 }
2427 EXPORT_SYMBOL(xfrm_find_acq_byseq);
2428
xfrm_get_acqseq(void)2429 u32 xfrm_get_acqseq(void)
2430 {
2431 u32 res;
2432 static atomic_t acqseq;
2433
2434 do {
2435 res = atomic_inc_return(&acqseq);
2436 } while (!res);
2437
2438 return res;
2439 }
2440 EXPORT_SYMBOL(xfrm_get_acqseq);
2441
verify_spi_info(u8 proto,u32 min,u32 max,struct netlink_ext_ack * extack)2442 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack)
2443 {
2444 switch (proto) {
2445 case IPPROTO_AH:
2446 case IPPROTO_ESP:
2447 break;
2448
2449 case IPPROTO_COMP:
2450 /* IPCOMP spi is 16-bits. */
2451 if (max >= 0x10000) {
2452 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535");
2453 return -EINVAL;
2454 }
2455 break;
2456
2457 default:
2458 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP");
2459 return -EINVAL;
2460 }
2461
2462 if (min > max) {
2463 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max");
2464 return -EINVAL;
2465 }
2466
2467 return 0;
2468 }
2469 EXPORT_SYMBOL(verify_spi_info);
2470
xfrm_alloc_spi(struct xfrm_state * x,u32 low,u32 high,struct netlink_ext_ack * extack)2471 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
2472 struct netlink_ext_ack *extack)
2473 {
2474 struct net *net = xs_net(x);
2475 unsigned int h;
2476 struct xfrm_state *x0;
2477 int err = -ENOENT;
2478 __be32 minspi = htonl(low);
2479 __be32 maxspi = htonl(high);
2480 __be32 newspi = 0;
2481 u32 mark = x->mark.v & x->mark.m;
2482
2483 spin_lock_bh(&x->lock);
2484 if (x->km.state == XFRM_STATE_DEAD) {
2485 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state");
2486 goto unlock;
2487 }
2488
2489 err = 0;
2490 if (x->id.spi)
2491 goto unlock;
2492
2493 err = -ENOENT;
2494
2495 if (minspi == maxspi) {
2496 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
2497 if (x0) {
2498 NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
2499 xfrm_state_put(x0);
2500 goto unlock;
2501 }
2502 newspi = minspi;
2503 } else {
2504 u32 spi = 0;
2505 for (h = 0; h < high-low+1; h++) {
2506 spi = get_random_u32_inclusive(low, high);
2507 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
2508 if (x0 == NULL) {
2509 newspi = htonl(spi);
2510 break;
2511 }
2512 xfrm_state_put(x0);
2513 }
2514 }
2515 if (newspi) {
2516 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2517 x->id.spi = newspi;
2518 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
2519 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
2520 x->xso.type);
2521 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2522
2523 err = 0;
2524 } else {
2525 NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
2526 }
2527
2528 unlock:
2529 spin_unlock_bh(&x->lock);
2530
2531 return err;
2532 }
2533 EXPORT_SYMBOL(xfrm_alloc_spi);
2534
__xfrm_state_filter_match(struct xfrm_state * x,struct xfrm_address_filter * filter)2535 static bool __xfrm_state_filter_match(struct xfrm_state *x,
2536 struct xfrm_address_filter *filter)
2537 {
2538 if (filter) {
2539 if ((filter->family == AF_INET ||
2540 filter->family == AF_INET6) &&
2541 x->props.family != filter->family)
2542 return false;
2543
2544 return addr_match(&x->props.saddr, &filter->saddr,
2545 filter->splen) &&
2546 addr_match(&x->id.daddr, &filter->daddr,
2547 filter->dplen);
2548 }
2549 return true;
2550 }
2551
xfrm_state_walk(struct net * net,struct xfrm_state_walk * walk,int (* func)(struct xfrm_state *,int,void *),void * data)2552 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2553 int (*func)(struct xfrm_state *, int, void*),
2554 void *data)
2555 {
2556 struct xfrm_state *state;
2557 struct xfrm_state_walk *x;
2558 int err = 0;
2559
2560 if (walk->seq != 0 && list_empty(&walk->all))
2561 return 0;
2562
2563 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2564 if (list_empty(&walk->all))
2565 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2566 else
2567 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2568 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2569 if (x->state == XFRM_STATE_DEAD)
2570 continue;
2571 state = container_of(x, struct xfrm_state, km);
2572 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2573 continue;
2574 if (!__xfrm_state_filter_match(state, walk->filter))
2575 continue;
2576 err = func(state, walk->seq, data);
2577 if (err) {
2578 list_move_tail(&walk->all, &x->all);
2579 goto out;
2580 }
2581 walk->seq++;
2582 }
2583 if (walk->seq == 0) {
2584 err = -ENOENT;
2585 goto out;
2586 }
2587 list_del_init(&walk->all);
2588 out:
2589 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2590 return err;
2591 }
2592 EXPORT_SYMBOL(xfrm_state_walk);
2593
xfrm_state_walk_init(struct xfrm_state_walk * walk,u8 proto,struct xfrm_address_filter * filter)2594 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2595 struct xfrm_address_filter *filter)
2596 {
2597 INIT_LIST_HEAD(&walk->all);
2598 walk->proto = proto;
2599 walk->state = XFRM_STATE_DEAD;
2600 walk->seq = 0;
2601 walk->filter = filter;
2602 }
2603 EXPORT_SYMBOL(xfrm_state_walk_init);
2604
xfrm_state_walk_done(struct xfrm_state_walk * walk,struct net * net)2605 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2606 {
2607 kfree(walk->filter);
2608
2609 if (list_empty(&walk->all))
2610 return;
2611
2612 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2613 list_del(&walk->all);
2614 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2615 }
2616 EXPORT_SYMBOL(xfrm_state_walk_done);
2617
xfrm_replay_timer_handler(struct timer_list * t)2618 static void xfrm_replay_timer_handler(struct timer_list *t)
2619 {
2620 struct xfrm_state *x = from_timer(x, t, rtimer);
2621
2622 spin_lock(&x->lock);
2623
2624 if (x->km.state == XFRM_STATE_VALID) {
2625 if (xfrm_aevent_is_on(xs_net(x)))
2626 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
2627 else
2628 x->xflags |= XFRM_TIME_DEFER;
2629 }
2630
2631 spin_unlock(&x->lock);
2632 }
2633
2634 static LIST_HEAD(xfrm_km_list);
2635
km_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)2636 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2637 {
2638 struct xfrm_mgr *km;
2639
2640 rcu_read_lock();
2641 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2642 if (km->notify_policy)
2643 km->notify_policy(xp, dir, c);
2644 rcu_read_unlock();
2645 }
2646
km_state_notify(struct xfrm_state * x,const struct km_event * c)2647 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2648 {
2649 struct xfrm_mgr *km;
2650 rcu_read_lock();
2651 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2652 if (km->notify)
2653 km->notify(x, c);
2654 rcu_read_unlock();
2655 }
2656
2657 EXPORT_SYMBOL(km_policy_notify);
2658 EXPORT_SYMBOL(km_state_notify);
2659
km_state_expired(struct xfrm_state * x,int hard,u32 portid)2660 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2661 {
2662 struct km_event c;
2663
2664 c.data.hard = hard;
2665 c.portid = portid;
2666 c.event = XFRM_MSG_EXPIRE;
2667 km_state_notify(x, &c);
2668 }
2669
2670 EXPORT_SYMBOL(km_state_expired);
2671 /*
2672 * We send to all registered managers regardless of failure
2673 * We are happy with one success
2674 */
km_query(struct xfrm_state * x,struct xfrm_tmpl * t,struct xfrm_policy * pol)2675 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2676 {
2677 int err = -EINVAL, acqret;
2678 struct xfrm_mgr *km;
2679
2680 rcu_read_lock();
2681 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2682 acqret = km->acquire(x, t, pol);
2683 if (!acqret)
2684 err = acqret;
2685 }
2686 rcu_read_unlock();
2687 return err;
2688 }
2689 EXPORT_SYMBOL(km_query);
2690
__km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2691 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2692 {
2693 int err = -EINVAL;
2694 struct xfrm_mgr *km;
2695
2696 rcu_read_lock();
2697 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2698 if (km->new_mapping)
2699 err = km->new_mapping(x, ipaddr, sport);
2700 if (!err)
2701 break;
2702 }
2703 rcu_read_unlock();
2704 return err;
2705 }
2706
km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2707 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2708 {
2709 int ret = 0;
2710
2711 if (x->mapping_maxage) {
2712 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
2713 x->new_mapping_sport != sport) {
2714 x->new_mapping_sport = sport;
2715 x->new_mapping = jiffies / HZ;
2716 ret = __km_new_mapping(x, ipaddr, sport);
2717 }
2718 } else {
2719 ret = __km_new_mapping(x, ipaddr, sport);
2720 }
2721
2722 return ret;
2723 }
2724 EXPORT_SYMBOL(km_new_mapping);
2725
km_policy_expired(struct xfrm_policy * pol,int dir,int hard,u32 portid)2726 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2727 {
2728 struct km_event c;
2729
2730 c.data.hard = hard;
2731 c.portid = portid;
2732 c.event = XFRM_MSG_POLEXPIRE;
2733 km_policy_notify(pol, dir, &c);
2734 }
2735 EXPORT_SYMBOL(km_policy_expired);
2736
2737 #ifdef CONFIG_XFRM_MIGRATE
km_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)2738 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2739 const struct xfrm_migrate *m, int num_migrate,
2740 const struct xfrm_kmaddress *k,
2741 const struct xfrm_encap_tmpl *encap)
2742 {
2743 int err = -EINVAL;
2744 int ret;
2745 struct xfrm_mgr *km;
2746
2747 rcu_read_lock();
2748 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2749 if (km->migrate) {
2750 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2751 encap);
2752 if (!ret)
2753 err = ret;
2754 }
2755 }
2756 rcu_read_unlock();
2757 return err;
2758 }
2759 EXPORT_SYMBOL(km_migrate);
2760 #endif
2761
km_report(struct net * net,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)2762 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2763 {
2764 int err = -EINVAL;
2765 int ret;
2766 struct xfrm_mgr *km;
2767
2768 rcu_read_lock();
2769 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2770 if (km->report) {
2771 ret = km->report(net, proto, sel, addr);
2772 if (!ret)
2773 err = ret;
2774 }
2775 }
2776 rcu_read_unlock();
2777 return err;
2778 }
2779 EXPORT_SYMBOL(km_report);
2780
km_is_alive(const struct km_event * c)2781 static bool km_is_alive(const struct km_event *c)
2782 {
2783 struct xfrm_mgr *km;
2784 bool is_alive = false;
2785
2786 rcu_read_lock();
2787 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2788 if (km->is_alive && km->is_alive(c)) {
2789 is_alive = true;
2790 break;
2791 }
2792 }
2793 rcu_read_unlock();
2794
2795 return is_alive;
2796 }
2797
2798 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2799 static DEFINE_SPINLOCK(xfrm_translator_lock);
2800 static struct xfrm_translator __rcu *xfrm_translator;
2801
xfrm_get_translator(void)2802 struct xfrm_translator *xfrm_get_translator(void)
2803 {
2804 struct xfrm_translator *xtr;
2805
2806 rcu_read_lock();
2807 xtr = rcu_dereference(xfrm_translator);
2808 if (unlikely(!xtr))
2809 goto out;
2810 if (!try_module_get(xtr->owner))
2811 xtr = NULL;
2812 out:
2813 rcu_read_unlock();
2814 return xtr;
2815 }
2816 EXPORT_SYMBOL_GPL(xfrm_get_translator);
2817
xfrm_put_translator(struct xfrm_translator * xtr)2818 void xfrm_put_translator(struct xfrm_translator *xtr)
2819 {
2820 module_put(xtr->owner);
2821 }
2822 EXPORT_SYMBOL_GPL(xfrm_put_translator);
2823
xfrm_register_translator(struct xfrm_translator * xtr)2824 int xfrm_register_translator(struct xfrm_translator *xtr)
2825 {
2826 int err = 0;
2827
2828 spin_lock_bh(&xfrm_translator_lock);
2829 if (unlikely(xfrm_translator != NULL))
2830 err = -EEXIST;
2831 else
2832 rcu_assign_pointer(xfrm_translator, xtr);
2833 spin_unlock_bh(&xfrm_translator_lock);
2834
2835 return err;
2836 }
2837 EXPORT_SYMBOL_GPL(xfrm_register_translator);
2838
xfrm_unregister_translator(struct xfrm_translator * xtr)2839 int xfrm_unregister_translator(struct xfrm_translator *xtr)
2840 {
2841 int err = 0;
2842
2843 spin_lock_bh(&xfrm_translator_lock);
2844 if (likely(xfrm_translator != NULL)) {
2845 if (rcu_access_pointer(xfrm_translator) != xtr)
2846 err = -EINVAL;
2847 else
2848 RCU_INIT_POINTER(xfrm_translator, NULL);
2849 }
2850 spin_unlock_bh(&xfrm_translator_lock);
2851 synchronize_rcu();
2852
2853 return err;
2854 }
2855 EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
2856 #endif
2857
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)2858 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
2859 {
2860 int err;
2861 u8 *data;
2862 struct xfrm_mgr *km;
2863 struct xfrm_policy *pol = NULL;
2864
2865 if (sockptr_is_null(optval) && !optlen) {
2866 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2867 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2868 __sk_dst_reset(sk);
2869 return 0;
2870 }
2871
2872 if (optlen <= 0 || optlen > PAGE_SIZE)
2873 return -EMSGSIZE;
2874
2875 data = memdup_sockptr(optval, optlen);
2876 if (IS_ERR(data))
2877 return PTR_ERR(data);
2878
2879 /* Use the 64-bit / untranslated format on Android, even for compat */
2880 if (!IS_ENABLED(CONFIG_GKI_NET_XFRM_HACKS) && in_compat_syscall()) {
2881 struct xfrm_translator *xtr = xfrm_get_translator();
2882
2883 if (!xtr) {
2884 kfree(data);
2885 return -EOPNOTSUPP;
2886 }
2887
2888 err = xtr->xlate_user_policy_sockptr(&data, optlen);
2889 xfrm_put_translator(xtr);
2890 if (err) {
2891 kfree(data);
2892 return err;
2893 }
2894 }
2895
2896 err = -EINVAL;
2897 rcu_read_lock();
2898 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2899 pol = km->compile_policy(sk, optname, data,
2900 optlen, &err);
2901 if (err >= 0)
2902 break;
2903 }
2904 rcu_read_unlock();
2905
2906 if (err >= 0) {
2907 xfrm_sk_policy_insert(sk, err, pol);
2908 xfrm_pol_put(pol);
2909 __sk_dst_reset(sk);
2910 err = 0;
2911 }
2912
2913 kfree(data);
2914 return err;
2915 }
2916 EXPORT_SYMBOL(xfrm_user_policy);
2917
2918 static DEFINE_SPINLOCK(xfrm_km_lock);
2919
xfrm_register_km(struct xfrm_mgr * km)2920 void xfrm_register_km(struct xfrm_mgr *km)
2921 {
2922 spin_lock_bh(&xfrm_km_lock);
2923 list_add_tail_rcu(&km->list, &xfrm_km_list);
2924 spin_unlock_bh(&xfrm_km_lock);
2925 }
2926 EXPORT_SYMBOL(xfrm_register_km);
2927
xfrm_unregister_km(struct xfrm_mgr * km)2928 void xfrm_unregister_km(struct xfrm_mgr *km)
2929 {
2930 spin_lock_bh(&xfrm_km_lock);
2931 list_del_rcu(&km->list);
2932 spin_unlock_bh(&xfrm_km_lock);
2933 synchronize_rcu();
2934 }
2935 EXPORT_SYMBOL(xfrm_unregister_km);
2936
xfrm_state_register_afinfo(struct xfrm_state_afinfo * afinfo)2937 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2938 {
2939 int err = 0;
2940
2941 if (WARN_ON(afinfo->family >= NPROTO))
2942 return -EAFNOSUPPORT;
2943
2944 spin_lock_bh(&xfrm_state_afinfo_lock);
2945 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2946 err = -EEXIST;
2947 else
2948 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2949 spin_unlock_bh(&xfrm_state_afinfo_lock);
2950 return err;
2951 }
2952 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2953
xfrm_state_unregister_afinfo(struct xfrm_state_afinfo * afinfo)2954 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2955 {
2956 int err = 0, family = afinfo->family;
2957
2958 if (WARN_ON(family >= NPROTO))
2959 return -EAFNOSUPPORT;
2960
2961 spin_lock_bh(&xfrm_state_afinfo_lock);
2962 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2963 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2964 err = -EINVAL;
2965 else
2966 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2967 }
2968 spin_unlock_bh(&xfrm_state_afinfo_lock);
2969 synchronize_rcu();
2970 return err;
2971 }
2972 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2973
xfrm_state_afinfo_get_rcu(unsigned int family)2974 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2975 {
2976 if (unlikely(family >= NPROTO))
2977 return NULL;
2978
2979 return rcu_dereference(xfrm_state_afinfo[family]);
2980 }
2981 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
2982
xfrm_state_get_afinfo(unsigned int family)2983 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2984 {
2985 struct xfrm_state_afinfo *afinfo;
2986 if (unlikely(family >= NPROTO))
2987 return NULL;
2988 rcu_read_lock();
2989 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2990 if (unlikely(!afinfo))
2991 rcu_read_unlock();
2992 return afinfo;
2993 }
2994
xfrm_flush_gc(void)2995 void xfrm_flush_gc(void)
2996 {
2997 flush_work(&xfrm_state_gc_work);
2998 }
2999 EXPORT_SYMBOL(xfrm_flush_gc);
3000
3001 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
xfrm_state_delete_tunnel(struct xfrm_state * x)3002 void xfrm_state_delete_tunnel(struct xfrm_state *x)
3003 {
3004 if (x->tunnel) {
3005 struct xfrm_state *t = x->tunnel;
3006
3007 if (atomic_read(&t->tunnel_users) == 2)
3008 xfrm_state_delete(t);
3009 atomic_dec(&t->tunnel_users);
3010 xfrm_state_put_sync(t);
3011 x->tunnel = NULL;
3012 }
3013 }
3014 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
3015
xfrm_state_mtu(struct xfrm_state * x,int mtu)3016 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
3017 {
3018 const struct xfrm_type *type = READ_ONCE(x->type);
3019 struct crypto_aead *aead;
3020 u32 blksize, net_adj = 0;
3021
3022 if (x->km.state != XFRM_STATE_VALID ||
3023 !type || type->proto != IPPROTO_ESP)
3024 return mtu - x->props.header_len;
3025
3026 aead = x->data;
3027 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
3028
3029 switch (x->props.mode) {
3030 case XFRM_MODE_TRANSPORT:
3031 case XFRM_MODE_BEET:
3032 if (x->props.family == AF_INET)
3033 net_adj = sizeof(struct iphdr);
3034 else if (x->props.family == AF_INET6)
3035 net_adj = sizeof(struct ipv6hdr);
3036 break;
3037 case XFRM_MODE_TUNNEL:
3038 break;
3039 default:
3040 WARN_ON_ONCE(1);
3041 break;
3042 }
3043
3044 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
3045 net_adj) & ~(blksize - 1)) + net_adj - 2;
3046 }
3047 EXPORT_SYMBOL_GPL(xfrm_state_mtu);
3048
__xfrm_init_state(struct xfrm_state * x,bool init_replay,bool offload,struct netlink_ext_ack * extack)3049 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
3050 struct netlink_ext_ack *extack)
3051 {
3052 const struct xfrm_mode *inner_mode;
3053 const struct xfrm_mode *outer_mode;
3054 int family = x->props.family;
3055 int err;
3056
3057 if (family == AF_INET &&
3058 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
3059 x->props.flags |= XFRM_STATE_NOPMTUDISC;
3060
3061 err = -EPROTONOSUPPORT;
3062
3063 if (x->sel.family != AF_UNSPEC) {
3064 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
3065 if (inner_mode == NULL) {
3066 NL_SET_ERR_MSG(extack, "Requested mode not found");
3067 goto error;
3068 }
3069
3070 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
3071 family != x->sel.family) {
3072 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
3073 goto error;
3074 }
3075
3076 x->inner_mode = *inner_mode;
3077 } else {
3078 const struct xfrm_mode *inner_mode_iaf;
3079 int iafamily = AF_INET;
3080
3081 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
3082 if (inner_mode == NULL) {
3083 NL_SET_ERR_MSG(extack, "Requested mode not found");
3084 goto error;
3085 }
3086
3087 x->inner_mode = *inner_mode;
3088
3089 if (x->props.family == AF_INET)
3090 iafamily = AF_INET6;
3091
3092 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
3093 if (inner_mode_iaf) {
3094 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
3095 x->inner_mode_iaf = *inner_mode_iaf;
3096 }
3097 }
3098
3099 x->type = xfrm_get_type(x->id.proto, family);
3100 if (x->type == NULL) {
3101 NL_SET_ERR_MSG(extack, "Requested type not found");
3102 goto error;
3103 }
3104
3105 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
3106
3107 err = x->type->init_state(x, extack);
3108 if (err)
3109 goto error;
3110
3111 outer_mode = xfrm_get_mode(x->props.mode, family);
3112 if (!outer_mode) {
3113 NL_SET_ERR_MSG(extack, "Requested mode not found");
3114 err = -EPROTONOSUPPORT;
3115 goto error;
3116 }
3117
3118 x->outer_mode = *outer_mode;
3119 if (init_replay) {
3120 err = xfrm_init_replay(x, extack);
3121 if (err)
3122 goto error;
3123 }
3124
3125 if (x->nat_keepalive_interval) {
3126 if (x->dir != XFRM_SA_DIR_OUT) {
3127 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
3128 err = -EINVAL;
3129 goto error;
3130 }
3131
3132 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
3133 NL_SET_ERR_MSG(extack,
3134 "NAT keepalive is only supported for UDP encapsulation");
3135 err = -EINVAL;
3136 goto error;
3137 }
3138 }
3139
3140 error:
3141 return err;
3142 }
3143
3144 EXPORT_SYMBOL(__xfrm_init_state);
3145
xfrm_init_state(struct xfrm_state * x)3146 int xfrm_init_state(struct xfrm_state *x)
3147 {
3148 int err;
3149
3150 err = __xfrm_init_state(x, true, false, NULL);
3151 if (!err)
3152 x->km.state = XFRM_STATE_VALID;
3153
3154 return err;
3155 }
3156
3157 EXPORT_SYMBOL(xfrm_init_state);
3158
xfrm_state_init(struct net * net)3159 int __net_init xfrm_state_init(struct net *net)
3160 {
3161 unsigned int sz;
3162
3163 if (net_eq(net, &init_net))
3164 xfrm_state_cache = KMEM_CACHE(xfrm_state,
3165 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3166
3167 INIT_LIST_HEAD(&net->xfrm.state_all);
3168
3169 sz = sizeof(struct hlist_head) * 8;
3170
3171 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
3172 if (!net->xfrm.state_bydst)
3173 goto out_bydst;
3174 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
3175 if (!net->xfrm.state_bysrc)
3176 goto out_bysrc;
3177 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
3178 if (!net->xfrm.state_byspi)
3179 goto out_byspi;
3180 net->xfrm.state_byseq = xfrm_hash_alloc(sz);
3181 if (!net->xfrm.state_byseq)
3182 goto out_byseq;
3183
3184 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
3185 if (!net->xfrm.state_cache_input)
3186 goto out_state_cache_input;
3187
3188 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
3189
3190 net->xfrm.state_num = 0;
3191 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
3192 spin_lock_init(&net->xfrm.xfrm_state_lock);
3193 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
3194 &net->xfrm.xfrm_state_lock);
3195 return 0;
3196
3197 out_state_cache_input:
3198 xfrm_hash_free(net->xfrm.state_byseq, sz);
3199 out_byseq:
3200 xfrm_hash_free(net->xfrm.state_byspi, sz);
3201 out_byspi:
3202 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3203 out_bysrc:
3204 xfrm_hash_free(net->xfrm.state_bydst, sz);
3205 out_bydst:
3206 return -ENOMEM;
3207 }
3208
xfrm_state_fini(struct net * net)3209 void xfrm_state_fini(struct net *net)
3210 {
3211 unsigned int sz;
3212
3213 flush_work(&net->xfrm.state_hash_work);
3214 flush_work(&xfrm_state_gc_work);
3215 xfrm_state_flush(net, 0, false, true);
3216
3217 WARN_ON(!list_empty(&net->xfrm.state_all));
3218
3219 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
3220 WARN_ON(!hlist_empty(net->xfrm.state_byseq));
3221 xfrm_hash_free(net->xfrm.state_byseq, sz);
3222 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
3223 xfrm_hash_free(net->xfrm.state_byspi, sz);
3224 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
3225 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3226 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
3227 xfrm_hash_free(net->xfrm.state_bydst, sz);
3228 free_percpu(net->xfrm.state_cache_input);
3229 }
3230
3231 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_helper_sainfo(struct xfrm_state * x,struct audit_buffer * audit_buf)3232 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
3233 struct audit_buffer *audit_buf)
3234 {
3235 struct xfrm_sec_ctx *ctx = x->security;
3236 u32 spi = ntohl(x->id.spi);
3237
3238 if (ctx)
3239 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3240 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3241
3242 switch (x->props.family) {
3243 case AF_INET:
3244 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3245 &x->props.saddr.a4, &x->id.daddr.a4);
3246 break;
3247 case AF_INET6:
3248 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
3249 x->props.saddr.a6, x->id.daddr.a6);
3250 break;
3251 }
3252
3253 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3254 }
3255
xfrm_audit_helper_pktinfo(struct sk_buff * skb,u16 family,struct audit_buffer * audit_buf)3256 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
3257 struct audit_buffer *audit_buf)
3258 {
3259 const struct iphdr *iph4;
3260 const struct ipv6hdr *iph6;
3261
3262 switch (family) {
3263 case AF_INET:
3264 iph4 = ip_hdr(skb);
3265 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3266 &iph4->saddr, &iph4->daddr);
3267 break;
3268 case AF_INET6:
3269 iph6 = ipv6_hdr(skb);
3270 audit_log_format(audit_buf,
3271 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
3272 &iph6->saddr, &iph6->daddr,
3273 iph6->flow_lbl[0] & 0x0f,
3274 iph6->flow_lbl[1],
3275 iph6->flow_lbl[2]);
3276 break;
3277 }
3278 }
3279
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)3280 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
3281 {
3282 struct audit_buffer *audit_buf;
3283
3284 audit_buf = xfrm_audit_start("SAD-add");
3285 if (audit_buf == NULL)
3286 return;
3287 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3288 xfrm_audit_helper_sainfo(x, audit_buf);
3289 audit_log_format(audit_buf, " res=%u", result);
3290 audit_log_end(audit_buf);
3291 }
3292 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
3293
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)3294 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
3295 {
3296 struct audit_buffer *audit_buf;
3297
3298 audit_buf = xfrm_audit_start("SAD-delete");
3299 if (audit_buf == NULL)
3300 return;
3301 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3302 xfrm_audit_helper_sainfo(x, audit_buf);
3303 audit_log_format(audit_buf, " res=%u", result);
3304 audit_log_end(audit_buf);
3305 }
3306 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
3307
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)3308 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
3309 struct sk_buff *skb)
3310 {
3311 struct audit_buffer *audit_buf;
3312 u32 spi;
3313
3314 audit_buf = xfrm_audit_start("SA-replay-overflow");
3315 if (audit_buf == NULL)
3316 return;
3317 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3318 /* don't record the sequence number because it's inherent in this kind
3319 * of audit message */
3320 spi = ntohl(x->id.spi);
3321 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3322 audit_log_end(audit_buf);
3323 }
3324 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
3325
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)3326 void xfrm_audit_state_replay(struct xfrm_state *x,
3327 struct sk_buff *skb, __be32 net_seq)
3328 {
3329 struct audit_buffer *audit_buf;
3330 u32 spi;
3331
3332 audit_buf = xfrm_audit_start("SA-replayed-pkt");
3333 if (audit_buf == NULL)
3334 return;
3335 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3336 spi = ntohl(x->id.spi);
3337 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3338 spi, spi, ntohl(net_seq));
3339 audit_log_end(audit_buf);
3340 }
3341 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
3342
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)3343 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
3344 {
3345 struct audit_buffer *audit_buf;
3346
3347 audit_buf = xfrm_audit_start("SA-notfound");
3348 if (audit_buf == NULL)
3349 return;
3350 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3351 audit_log_end(audit_buf);
3352 }
3353 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
3354
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)3355 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
3356 __be32 net_spi, __be32 net_seq)
3357 {
3358 struct audit_buffer *audit_buf;
3359 u32 spi;
3360
3361 audit_buf = xfrm_audit_start("SA-notfound");
3362 if (audit_buf == NULL)
3363 return;
3364 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3365 spi = ntohl(net_spi);
3366 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3367 spi, spi, ntohl(net_seq));
3368 audit_log_end(audit_buf);
3369 }
3370 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
3371
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)3372 void xfrm_audit_state_icvfail(struct xfrm_state *x,
3373 struct sk_buff *skb, u8 proto)
3374 {
3375 struct audit_buffer *audit_buf;
3376 __be32 net_spi;
3377 __be32 net_seq;
3378
3379 audit_buf = xfrm_audit_start("SA-icv-failure");
3380 if (audit_buf == NULL)
3381 return;
3382 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3383 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
3384 u32 spi = ntohl(net_spi);
3385 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3386 spi, spi, ntohl(net_seq));
3387 }
3388 audit_log_end(audit_buf);
3389 }
3390 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
3391 #endif /* CONFIG_AUDITSYSCALL */
3392