1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #ifndef __GENKSYMS__
35 #include <net/inet_ecn.h>
36 #endif
37 #include <net/xfrm.h>
38 #include <net/ip.h>
39 #ifndef __GENKSYMS__
40 #include <net/gre.h>
41 #endif
42 #if IS_ENABLED(CONFIG_IPV6_MIP6)
43 #include <net/mip6.h>
44 #endif
45 #ifdef CONFIG_XFRM_STATISTICS
46 #include <net/snmp.h>
47 #endif
48 #ifdef CONFIG_XFRM_ESPINTCP
49 #include <net/espintcp.h>
50 #endif
51
52 #include "xfrm_hash.h"
53
54 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
55 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
56 #define XFRM_MAX_QUEUE_LEN 100
57
58 struct xfrm_flo {
59 struct dst_entry *dst_orig;
60 u8 flags;
61 };
62
63 /* prefixes smaller than this are stored in lists, not trees. */
64 #define INEXACT_PREFIXLEN_IPV4 16
65 #define INEXACT_PREFIXLEN_IPV6 48
66
67 struct xfrm_pol_inexact_node {
68 struct rb_node node;
69 union {
70 xfrm_address_t addr;
71 struct rcu_head rcu;
72 };
73 u8 prefixlen;
74
75 struct rb_root root;
76
77 /* the policies matching this node, can be empty list */
78 struct hlist_head hhead;
79 };
80
81 /* xfrm inexact policy search tree:
82 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
83 * |
84 * +---- root_d: sorted by daddr:prefix
85 * | |
86 * | xfrm_pol_inexact_node
87 * | |
88 * | +- root: sorted by saddr/prefix
89 * | | |
90 * | | xfrm_pol_inexact_node
91 * | | |
92 * | | + root: unused
93 * | | |
94 * | | + hhead: saddr:daddr policies
95 * | |
96 * | +- coarse policies and all any:daddr policies
97 * |
98 * +---- root_s: sorted by saddr:prefix
99 * | |
100 * | xfrm_pol_inexact_node
101 * | |
102 * | + root: unused
103 * | |
104 * | + hhead: saddr:any policies
105 * |
106 * +---- coarse policies and all any:any policies
107 *
108 * Lookups return four candidate lists:
109 * 1. any:any list from top-level xfrm_pol_inexact_bin
110 * 2. any:daddr list from daddr tree
111 * 3. saddr:daddr list from 2nd level daddr tree
112 * 4. saddr:any list from saddr tree
113 *
114 * This result set then needs to be searched for the policy with
115 * the lowest priority. If two results have same prio, youngest one wins.
116 */
117
118 struct xfrm_pol_inexact_key {
119 possible_net_t net;
120 u32 if_id;
121 u16 family;
122 u8 dir, type;
123 };
124
125 struct xfrm_pol_inexact_bin {
126 struct xfrm_pol_inexact_key k;
127 struct rhash_head head;
128 /* list containing '*:*' policies */
129 struct hlist_head hhead;
130
131 seqcount_spinlock_t count;
132 /* tree sorted by daddr/prefix */
133 struct rb_root root_d;
134
135 /* tree sorted by saddr/prefix */
136 struct rb_root root_s;
137
138 /* slow path below */
139 struct list_head inexact_bins;
140 struct rcu_head rcu;
141 };
142
143 enum xfrm_pol_inexact_candidate_type {
144 XFRM_POL_CAND_BOTH,
145 XFRM_POL_CAND_SADDR,
146 XFRM_POL_CAND_DADDR,
147 XFRM_POL_CAND_ANY,
148
149 XFRM_POL_CAND_MAX,
150 };
151
152 struct xfrm_pol_inexact_candidates {
153 struct hlist_head *res[XFRM_POL_CAND_MAX];
154 };
155
156 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
157 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
158
159 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
160 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
161 __read_mostly;
162
163 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
164 static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
165
166 static struct rhashtable xfrm_policy_inexact_table;
167 static const struct rhashtable_params xfrm_pol_inexact_params;
168
169 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
170 static int stale_bundle(struct dst_entry *dst);
171 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
172 static void xfrm_policy_queue_process(struct timer_list *t);
173
174 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
175 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
176 int dir);
177
178 static struct xfrm_pol_inexact_bin *
179 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
180 u32 if_id);
181
182 static struct xfrm_pol_inexact_bin *
183 xfrm_policy_inexact_lookup_rcu(struct net *net,
184 u8 type, u16 family, u8 dir, u32 if_id);
185 static struct xfrm_policy *
186 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
187 bool excl);
188 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
189 struct xfrm_policy *policy);
190
191 static bool
192 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
193 struct xfrm_pol_inexact_bin *b,
194 const xfrm_address_t *saddr,
195 const xfrm_address_t *daddr);
196
xfrm_pol_hold_rcu(struct xfrm_policy * policy)197 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
198 {
199 return refcount_inc_not_zero(&policy->refcnt);
200 }
201
202 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)203 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
204 {
205 const struct flowi4 *fl4 = &fl->u.ip4;
206
207 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
208 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
209 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
210 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
211 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
212 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
213 }
214
215 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)216 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
217 {
218 const struct flowi6 *fl6 = &fl->u.ip6;
219
220 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
221 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
222 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
223 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
224 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
225 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
226 }
227
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)228 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
229 unsigned short family)
230 {
231 switch (family) {
232 case AF_INET:
233 return __xfrm4_selector_match(sel, fl);
234 case AF_INET6:
235 return __xfrm6_selector_match(sel, fl);
236 }
237 return false;
238 }
239
xfrm_policy_get_afinfo(unsigned short family)240 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
241 {
242 const struct xfrm_policy_afinfo *afinfo;
243
244 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
245 return NULL;
246 rcu_read_lock();
247 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
248 if (unlikely(!afinfo))
249 rcu_read_unlock();
250 return afinfo;
251 }
252
253 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)254 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
255 {
256 return rcu_dereference(xfrm_if_cb);
257 }
258
__xfrm_dst_lookup(struct net * net,int tos,int oif,const xfrm_address_t * saddr,const xfrm_address_t * daddr,int family,u32 mark)259 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
260 const xfrm_address_t *saddr,
261 const xfrm_address_t *daddr,
262 int family, u32 mark)
263 {
264 const struct xfrm_policy_afinfo *afinfo;
265 struct dst_entry *dst;
266
267 afinfo = xfrm_policy_get_afinfo(family);
268 if (unlikely(afinfo == NULL))
269 return ERR_PTR(-EAFNOSUPPORT);
270
271 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
272
273 rcu_read_unlock();
274
275 return dst;
276 }
277 EXPORT_SYMBOL(__xfrm_dst_lookup);
278
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)279 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
280 int tos, int oif,
281 xfrm_address_t *prev_saddr,
282 xfrm_address_t *prev_daddr,
283 int family, u32 mark)
284 {
285 struct net *net = xs_net(x);
286 xfrm_address_t *saddr = &x->props.saddr;
287 xfrm_address_t *daddr = &x->id.daddr;
288 struct dst_entry *dst;
289
290 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
291 saddr = x->coaddr;
292 daddr = prev_daddr;
293 }
294 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
295 saddr = prev_saddr;
296 daddr = x->coaddr;
297 }
298
299 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
300
301 if (!IS_ERR(dst)) {
302 if (prev_saddr != saddr)
303 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
304 if (prev_daddr != daddr)
305 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
306 }
307
308 return dst;
309 }
310
make_jiffies(long secs)311 static inline unsigned long make_jiffies(long secs)
312 {
313 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
314 return MAX_SCHEDULE_TIMEOUT-1;
315 else
316 return secs*HZ;
317 }
318
xfrm_policy_timer(struct timer_list * t)319 static void xfrm_policy_timer(struct timer_list *t)
320 {
321 struct xfrm_policy *xp = from_timer(xp, t, timer);
322 time64_t now = ktime_get_real_seconds();
323 time64_t next = TIME64_MAX;
324 int warn = 0;
325 int dir;
326
327 read_lock(&xp->lock);
328
329 if (unlikely(xp->walk.dead))
330 goto out;
331
332 dir = xfrm_policy_id2dir(xp->index);
333
334 if (xp->lft.hard_add_expires_seconds) {
335 time64_t tmo = xp->lft.hard_add_expires_seconds +
336 xp->curlft.add_time - now;
337 if (tmo <= 0)
338 goto expired;
339 if (tmo < next)
340 next = tmo;
341 }
342 if (xp->lft.hard_use_expires_seconds) {
343 time64_t tmo = xp->lft.hard_use_expires_seconds +
344 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
345 if (tmo <= 0)
346 goto expired;
347 if (tmo < next)
348 next = tmo;
349 }
350 if (xp->lft.soft_add_expires_seconds) {
351 time64_t tmo = xp->lft.soft_add_expires_seconds +
352 xp->curlft.add_time - now;
353 if (tmo <= 0) {
354 warn = 1;
355 tmo = XFRM_KM_TIMEOUT;
356 }
357 if (tmo < next)
358 next = tmo;
359 }
360 if (xp->lft.soft_use_expires_seconds) {
361 time64_t tmo = xp->lft.soft_use_expires_seconds +
362 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
363 if (tmo <= 0) {
364 warn = 1;
365 tmo = XFRM_KM_TIMEOUT;
366 }
367 if (tmo < next)
368 next = tmo;
369 }
370
371 if (warn)
372 km_policy_expired(xp, dir, 0, 0);
373 if (next != TIME64_MAX &&
374 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
375 xfrm_pol_hold(xp);
376
377 out:
378 read_unlock(&xp->lock);
379 xfrm_pol_put(xp);
380 return;
381
382 expired:
383 read_unlock(&xp->lock);
384 if (!xfrm_policy_delete(xp, dir))
385 km_policy_expired(xp, dir, 1, 0);
386 xfrm_pol_put(xp);
387 }
388
389 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
390 * SPD calls.
391 */
392
xfrm_policy_alloc(struct net * net,gfp_t gfp)393 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
394 {
395 struct xfrm_policy *policy;
396
397 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
398
399 if (policy) {
400 write_pnet(&policy->xp_net, net);
401 INIT_LIST_HEAD(&policy->walk.all);
402 INIT_HLIST_NODE(&policy->bydst_inexact_list);
403 INIT_HLIST_NODE(&policy->bydst);
404 INIT_HLIST_NODE(&policy->byidx);
405 rwlock_init(&policy->lock);
406 refcount_set(&policy->refcnt, 1);
407 skb_queue_head_init(&policy->polq.hold_queue);
408 timer_setup(&policy->timer, xfrm_policy_timer, 0);
409 timer_setup(&policy->polq.hold_timer,
410 xfrm_policy_queue_process, 0);
411 }
412 return policy;
413 }
414 EXPORT_SYMBOL(xfrm_policy_alloc);
415
xfrm_policy_destroy_rcu(struct rcu_head * head)416 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
417 {
418 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
419
420 security_xfrm_policy_free(policy->security);
421 kfree(policy);
422 }
423
424 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
425
xfrm_policy_destroy(struct xfrm_policy * policy)426 void xfrm_policy_destroy(struct xfrm_policy *policy)
427 {
428 BUG_ON(!policy->walk.dead);
429
430 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
431 BUG();
432
433 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
434 }
435 EXPORT_SYMBOL(xfrm_policy_destroy);
436
437 /* Rule must be locked. Release descendant resources, announce
438 * entry dead. The rule must be unlinked from lists to the moment.
439 */
440
xfrm_policy_kill(struct xfrm_policy * policy)441 static void xfrm_policy_kill(struct xfrm_policy *policy)
442 {
443 write_lock_bh(&policy->lock);
444 policy->walk.dead = 1;
445 write_unlock_bh(&policy->lock);
446
447 atomic_inc(&policy->genid);
448
449 if (del_timer(&policy->polq.hold_timer))
450 xfrm_pol_put(policy);
451 skb_queue_purge(&policy->polq.hold_queue);
452
453 if (del_timer(&policy->timer))
454 xfrm_pol_put(policy);
455
456 xfrm_pol_put(policy);
457 }
458
459 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
460
idx_hash(struct net * net,u32 index)461 static inline unsigned int idx_hash(struct net *net, u32 index)
462 {
463 return __idx_hash(index, net->xfrm.policy_idx_hmask);
464 }
465
466 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)467 static void __get_hash_thresh(struct net *net,
468 unsigned short family, int dir,
469 u8 *dbits, u8 *sbits)
470 {
471 switch (family) {
472 case AF_INET:
473 *dbits = net->xfrm.policy_bydst[dir].dbits4;
474 *sbits = net->xfrm.policy_bydst[dir].sbits4;
475 break;
476
477 case AF_INET6:
478 *dbits = net->xfrm.policy_bydst[dir].dbits6;
479 *sbits = net->xfrm.policy_bydst[dir].sbits6;
480 break;
481
482 default:
483 *dbits = 0;
484 *sbits = 0;
485 }
486 }
487
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)488 static struct hlist_head *policy_hash_bysel(struct net *net,
489 const struct xfrm_selector *sel,
490 unsigned short family, int dir)
491 {
492 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
493 unsigned int hash;
494 u8 dbits;
495 u8 sbits;
496
497 __get_hash_thresh(net, family, dir, &dbits, &sbits);
498 hash = __sel_hash(sel, family, hmask, dbits, sbits);
499
500 if (hash == hmask + 1)
501 return NULL;
502
503 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
504 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
505 }
506
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)507 static struct hlist_head *policy_hash_direct(struct net *net,
508 const xfrm_address_t *daddr,
509 const xfrm_address_t *saddr,
510 unsigned short family, int dir)
511 {
512 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
513 unsigned int hash;
514 u8 dbits;
515 u8 sbits;
516
517 __get_hash_thresh(net, family, dir, &dbits, &sbits);
518 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
519
520 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
521 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
522 }
523
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)524 static void xfrm_dst_hash_transfer(struct net *net,
525 struct hlist_head *list,
526 struct hlist_head *ndsttable,
527 unsigned int nhashmask,
528 int dir)
529 {
530 struct hlist_node *tmp, *entry0 = NULL;
531 struct xfrm_policy *pol;
532 unsigned int h0 = 0;
533 u8 dbits;
534 u8 sbits;
535
536 redo:
537 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
538 unsigned int h;
539
540 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
541 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
542 pol->family, nhashmask, dbits, sbits);
543 if (!entry0) {
544 hlist_del_rcu(&pol->bydst);
545 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
546 h0 = h;
547 } else {
548 if (h != h0)
549 continue;
550 hlist_del_rcu(&pol->bydst);
551 hlist_add_behind_rcu(&pol->bydst, entry0);
552 }
553 entry0 = &pol->bydst;
554 }
555 if (!hlist_empty(list)) {
556 entry0 = NULL;
557 goto redo;
558 }
559 }
560
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)561 static void xfrm_idx_hash_transfer(struct hlist_head *list,
562 struct hlist_head *nidxtable,
563 unsigned int nhashmask)
564 {
565 struct hlist_node *tmp;
566 struct xfrm_policy *pol;
567
568 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
569 unsigned int h;
570
571 h = __idx_hash(pol->index, nhashmask);
572 hlist_add_head(&pol->byidx, nidxtable+h);
573 }
574 }
575
xfrm_new_hash_mask(unsigned int old_hmask)576 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
577 {
578 return ((old_hmask + 1) << 1) - 1;
579 }
580
xfrm_bydst_resize(struct net * net,int dir)581 static void xfrm_bydst_resize(struct net *net, int dir)
582 {
583 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
584 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
585 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
586 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
587 struct hlist_head *odst;
588 int i;
589
590 if (!ndst)
591 return;
592
593 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
594 write_seqcount_begin(&xfrm_policy_hash_generation);
595
596 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
597 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
598
599 for (i = hmask; i >= 0; i--)
600 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
601
602 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
603 net->xfrm.policy_bydst[dir].hmask = nhashmask;
604
605 write_seqcount_end(&xfrm_policy_hash_generation);
606 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
607
608 synchronize_rcu();
609
610 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
611 }
612
xfrm_byidx_resize(struct net * net,int total)613 static void xfrm_byidx_resize(struct net *net, int total)
614 {
615 unsigned int hmask = net->xfrm.policy_idx_hmask;
616 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
617 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
618 struct hlist_head *oidx = net->xfrm.policy_byidx;
619 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
620 int i;
621
622 if (!nidx)
623 return;
624
625 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
626
627 for (i = hmask; i >= 0; i--)
628 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
629
630 net->xfrm.policy_byidx = nidx;
631 net->xfrm.policy_idx_hmask = nhashmask;
632
633 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
634
635 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
636 }
637
xfrm_bydst_should_resize(struct net * net,int dir,int * total)638 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
639 {
640 unsigned int cnt = net->xfrm.policy_count[dir];
641 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
642
643 if (total)
644 *total += cnt;
645
646 if ((hmask + 1) < xfrm_policy_hashmax &&
647 cnt > hmask)
648 return 1;
649
650 return 0;
651 }
652
xfrm_byidx_should_resize(struct net * net,int total)653 static inline int xfrm_byidx_should_resize(struct net *net, int total)
654 {
655 unsigned int hmask = net->xfrm.policy_idx_hmask;
656
657 if ((hmask + 1) < xfrm_policy_hashmax &&
658 total > hmask)
659 return 1;
660
661 return 0;
662 }
663
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)664 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
665 {
666 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
667 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
668 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
669 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
670 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
671 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
672 si->spdhcnt = net->xfrm.policy_idx_hmask;
673 si->spdhmcnt = xfrm_policy_hashmax;
674 }
675 EXPORT_SYMBOL(xfrm_spd_getinfo);
676
677 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)678 static void xfrm_hash_resize(struct work_struct *work)
679 {
680 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
681 int dir, total;
682
683 mutex_lock(&hash_resize_mutex);
684
685 total = 0;
686 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
687 if (xfrm_bydst_should_resize(net, dir, &total))
688 xfrm_bydst_resize(net, dir);
689 }
690 if (xfrm_byidx_should_resize(net, total))
691 xfrm_byidx_resize(net, total);
692
693 mutex_unlock(&hash_resize_mutex);
694 }
695
696 /* Make sure *pol can be inserted into fastbin.
697 * Useful to check that later insert requests will be sucessful
698 * (provided xfrm_policy_lock is held throughout).
699 */
700 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)701 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
702 {
703 struct xfrm_pol_inexact_bin *bin, *prev;
704 struct xfrm_pol_inexact_key k = {
705 .family = pol->family,
706 .type = pol->type,
707 .dir = dir,
708 .if_id = pol->if_id,
709 };
710 struct net *net = xp_net(pol);
711
712 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
713
714 write_pnet(&k.net, net);
715 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
716 xfrm_pol_inexact_params);
717 if (bin)
718 return bin;
719
720 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
721 if (!bin)
722 return NULL;
723
724 bin->k = k;
725 INIT_HLIST_HEAD(&bin->hhead);
726 bin->root_d = RB_ROOT;
727 bin->root_s = RB_ROOT;
728 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
729
730 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
731 &bin->k, &bin->head,
732 xfrm_pol_inexact_params);
733 if (!prev) {
734 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
735 return bin;
736 }
737
738 kfree(bin);
739
740 return IS_ERR(prev) ? NULL : prev;
741 }
742
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)743 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
744 int family, u8 prefixlen)
745 {
746 if (xfrm_addr_any(addr, family))
747 return true;
748
749 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
750 return true;
751
752 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
753 return true;
754
755 return false;
756 }
757
758 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)759 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
760 {
761 const xfrm_address_t *addr;
762 bool saddr_any, daddr_any;
763 u8 prefixlen;
764
765 addr = &policy->selector.saddr;
766 prefixlen = policy->selector.prefixlen_s;
767
768 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
769 policy->family,
770 prefixlen);
771 addr = &policy->selector.daddr;
772 prefixlen = policy->selector.prefixlen_d;
773 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
774 policy->family,
775 prefixlen);
776 return saddr_any && daddr_any;
777 }
778
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)779 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
780 const xfrm_address_t *addr, u8 prefixlen)
781 {
782 node->addr = *addr;
783 node->prefixlen = prefixlen;
784 }
785
786 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)787 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
788 {
789 struct xfrm_pol_inexact_node *node;
790
791 node = kzalloc(sizeof(*node), GFP_ATOMIC);
792 if (node)
793 xfrm_pol_inexact_node_init(node, addr, prefixlen);
794
795 return node;
796 }
797
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)798 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
799 const xfrm_address_t *b,
800 u8 prefixlen, u16 family)
801 {
802 u32 ma, mb, mask;
803 unsigned int pdw, pbi;
804 int delta = 0;
805
806 switch (family) {
807 case AF_INET:
808 if (prefixlen == 0)
809 return 0;
810 mask = ~0U << (32 - prefixlen);
811 ma = ntohl(a->a4) & mask;
812 mb = ntohl(b->a4) & mask;
813 if (ma < mb)
814 delta = -1;
815 else if (ma > mb)
816 delta = 1;
817 break;
818 case AF_INET6:
819 pdw = prefixlen >> 5;
820 pbi = prefixlen & 0x1f;
821
822 if (pdw) {
823 delta = memcmp(a->a6, b->a6, pdw << 2);
824 if (delta)
825 return delta;
826 }
827 if (pbi) {
828 mask = ~0U << (32 - pbi);
829 ma = ntohl(a->a6[pdw]) & mask;
830 mb = ntohl(b->a6[pdw]) & mask;
831 if (ma < mb)
832 delta = -1;
833 else if (ma > mb)
834 delta = 1;
835 }
836 break;
837 default:
838 break;
839 }
840
841 return delta;
842 }
843
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)844 static void xfrm_policy_inexact_list_reinsert(struct net *net,
845 struct xfrm_pol_inexact_node *n,
846 u16 family)
847 {
848 unsigned int matched_s, matched_d;
849 struct xfrm_policy *policy, *p;
850
851 matched_s = 0;
852 matched_d = 0;
853
854 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
855 struct hlist_node *newpos = NULL;
856 bool matches_s, matches_d;
857
858 if (!policy->bydst_reinsert)
859 continue;
860
861 WARN_ON_ONCE(policy->family != family);
862
863 policy->bydst_reinsert = false;
864 hlist_for_each_entry(p, &n->hhead, bydst) {
865 if (policy->priority > p->priority)
866 newpos = &p->bydst;
867 else if (policy->priority == p->priority &&
868 policy->pos > p->pos)
869 newpos = &p->bydst;
870 else
871 break;
872 }
873
874 if (newpos)
875 hlist_add_behind_rcu(&policy->bydst, newpos);
876 else
877 hlist_add_head_rcu(&policy->bydst, &n->hhead);
878
879 /* paranoia checks follow.
880 * Check that the reinserted policy matches at least
881 * saddr or daddr for current node prefix.
882 *
883 * Matching both is fine, matching saddr in one policy
884 * (but not daddr) and then matching only daddr in another
885 * is a bug.
886 */
887 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
888 &n->addr,
889 n->prefixlen,
890 family) == 0;
891 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
892 &n->addr,
893 n->prefixlen,
894 family) == 0;
895 if (matches_s && matches_d)
896 continue;
897
898 WARN_ON_ONCE(!matches_s && !matches_d);
899 if (matches_s)
900 matched_s++;
901 if (matches_d)
902 matched_d++;
903 WARN_ON_ONCE(matched_s && matched_d);
904 }
905 }
906
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)907 static void xfrm_policy_inexact_node_reinsert(struct net *net,
908 struct xfrm_pol_inexact_node *n,
909 struct rb_root *new,
910 u16 family)
911 {
912 struct xfrm_pol_inexact_node *node;
913 struct rb_node **p, *parent;
914
915 /* we should not have another subtree here */
916 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
917 restart:
918 parent = NULL;
919 p = &new->rb_node;
920 while (*p) {
921 u8 prefixlen;
922 int delta;
923
924 parent = *p;
925 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
926
927 prefixlen = min(node->prefixlen, n->prefixlen);
928
929 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
930 prefixlen, family);
931 if (delta < 0) {
932 p = &parent->rb_left;
933 } else if (delta > 0) {
934 p = &parent->rb_right;
935 } else {
936 bool same_prefixlen = node->prefixlen == n->prefixlen;
937 struct xfrm_policy *tmp;
938
939 hlist_for_each_entry(tmp, &n->hhead, bydst) {
940 tmp->bydst_reinsert = true;
941 hlist_del_rcu(&tmp->bydst);
942 }
943
944 node->prefixlen = prefixlen;
945
946 xfrm_policy_inexact_list_reinsert(net, node, family);
947
948 if (same_prefixlen) {
949 kfree_rcu(n, rcu);
950 return;
951 }
952
953 rb_erase(*p, new);
954 kfree_rcu(n, rcu);
955 n = node;
956 goto restart;
957 }
958 }
959
960 rb_link_node_rcu(&n->node, parent, p);
961 rb_insert_color(&n->node, new);
962 }
963
964 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)965 static void xfrm_policy_inexact_node_merge(struct net *net,
966 struct xfrm_pol_inexact_node *v,
967 struct xfrm_pol_inexact_node *n,
968 u16 family)
969 {
970 struct xfrm_pol_inexact_node *node;
971 struct xfrm_policy *tmp;
972 struct rb_node *rnode;
973
974 /* To-be-merged node v has a subtree.
975 *
976 * Dismantle it and insert its nodes to n->root.
977 */
978 while ((rnode = rb_first(&v->root)) != NULL) {
979 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
980 rb_erase(&node->node, &v->root);
981 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
982 family);
983 }
984
985 hlist_for_each_entry(tmp, &v->hhead, bydst) {
986 tmp->bydst_reinsert = true;
987 hlist_del_rcu(&tmp->bydst);
988 }
989
990 xfrm_policy_inexact_list_reinsert(net, n, family);
991 }
992
993 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)994 xfrm_policy_inexact_insert_node(struct net *net,
995 struct rb_root *root,
996 xfrm_address_t *addr,
997 u16 family, u8 prefixlen, u8 dir)
998 {
999 struct xfrm_pol_inexact_node *cached = NULL;
1000 struct rb_node **p, *parent = NULL;
1001 struct xfrm_pol_inexact_node *node;
1002
1003 p = &root->rb_node;
1004 while (*p) {
1005 int delta;
1006
1007 parent = *p;
1008 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1009
1010 delta = xfrm_policy_addr_delta(addr, &node->addr,
1011 node->prefixlen,
1012 family);
1013 if (delta == 0 && prefixlen >= node->prefixlen) {
1014 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1015 return node;
1016 }
1017
1018 if (delta < 0)
1019 p = &parent->rb_left;
1020 else
1021 p = &parent->rb_right;
1022
1023 if (prefixlen < node->prefixlen) {
1024 delta = xfrm_policy_addr_delta(addr, &node->addr,
1025 prefixlen,
1026 family);
1027 if (delta)
1028 continue;
1029
1030 /* This node is a subnet of the new prefix. It needs
1031 * to be removed and re-inserted with the smaller
1032 * prefix and all nodes that are now also covered
1033 * by the reduced prefixlen.
1034 */
1035 rb_erase(&node->node, root);
1036
1037 if (!cached) {
1038 xfrm_pol_inexact_node_init(node, addr,
1039 prefixlen);
1040 cached = node;
1041 } else {
1042 /* This node also falls within the new
1043 * prefixlen. Merge the to-be-reinserted
1044 * node and this one.
1045 */
1046 xfrm_policy_inexact_node_merge(net, node,
1047 cached, family);
1048 kfree_rcu(node, rcu);
1049 }
1050
1051 /* restart */
1052 p = &root->rb_node;
1053 parent = NULL;
1054 }
1055 }
1056
1057 node = cached;
1058 if (!node) {
1059 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1060 if (!node)
1061 return NULL;
1062 }
1063
1064 rb_link_node_rcu(&node->node, parent, p);
1065 rb_insert_color(&node->node, root);
1066
1067 return node;
1068 }
1069
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1070 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1071 {
1072 struct xfrm_pol_inexact_node *node;
1073 struct rb_node *rn = rb_first(r);
1074
1075 while (rn) {
1076 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1077
1078 xfrm_policy_inexact_gc_tree(&node->root, rm);
1079 rn = rb_next(rn);
1080
1081 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1082 WARN_ON_ONCE(rm);
1083 continue;
1084 }
1085
1086 rb_erase(&node->node, r);
1087 kfree_rcu(node, rcu);
1088 }
1089 }
1090
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1091 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1092 {
1093 write_seqcount_begin(&b->count);
1094 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1095 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1096 write_seqcount_end(&b->count);
1097
1098 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1099 !hlist_empty(&b->hhead)) {
1100 WARN_ON_ONCE(net_exit);
1101 return;
1102 }
1103
1104 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1105 xfrm_pol_inexact_params) == 0) {
1106 list_del(&b->inexact_bins);
1107 kfree_rcu(b, rcu);
1108 }
1109 }
1110
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1111 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1112 {
1113 struct net *net = read_pnet(&b->k.net);
1114
1115 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1116 __xfrm_policy_inexact_prune_bin(b, false);
1117 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1118 }
1119
__xfrm_policy_inexact_flush(struct net * net)1120 static void __xfrm_policy_inexact_flush(struct net *net)
1121 {
1122 struct xfrm_pol_inexact_bin *bin, *t;
1123
1124 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1125
1126 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1127 __xfrm_policy_inexact_prune_bin(bin, false);
1128 }
1129
1130 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1131 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1132 struct xfrm_policy *policy, u8 dir)
1133 {
1134 struct xfrm_pol_inexact_node *n;
1135 struct net *net;
1136
1137 net = xp_net(policy);
1138 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1139
1140 if (xfrm_policy_inexact_insert_use_any_list(policy))
1141 return &bin->hhead;
1142
1143 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1144 policy->family,
1145 policy->selector.prefixlen_d)) {
1146 write_seqcount_begin(&bin->count);
1147 n = xfrm_policy_inexact_insert_node(net,
1148 &bin->root_s,
1149 &policy->selector.saddr,
1150 policy->family,
1151 policy->selector.prefixlen_s,
1152 dir);
1153 write_seqcount_end(&bin->count);
1154 if (!n)
1155 return NULL;
1156
1157 return &n->hhead;
1158 }
1159
1160 /* daddr is fixed */
1161 write_seqcount_begin(&bin->count);
1162 n = xfrm_policy_inexact_insert_node(net,
1163 &bin->root_d,
1164 &policy->selector.daddr,
1165 policy->family,
1166 policy->selector.prefixlen_d, dir);
1167 write_seqcount_end(&bin->count);
1168 if (!n)
1169 return NULL;
1170
1171 /* saddr is wildcard */
1172 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1173 policy->family,
1174 policy->selector.prefixlen_s))
1175 return &n->hhead;
1176
1177 write_seqcount_begin(&bin->count);
1178 n = xfrm_policy_inexact_insert_node(net,
1179 &n->root,
1180 &policy->selector.saddr,
1181 policy->family,
1182 policy->selector.prefixlen_s, dir);
1183 write_seqcount_end(&bin->count);
1184 if (!n)
1185 return NULL;
1186
1187 return &n->hhead;
1188 }
1189
1190 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1191 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1192 {
1193 struct xfrm_pol_inexact_bin *bin;
1194 struct xfrm_policy *delpol;
1195 struct hlist_head *chain;
1196 struct net *net;
1197
1198 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1199 if (!bin)
1200 return ERR_PTR(-ENOMEM);
1201
1202 net = xp_net(policy);
1203 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1204
1205 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1206 if (!chain) {
1207 __xfrm_policy_inexact_prune_bin(bin, false);
1208 return ERR_PTR(-ENOMEM);
1209 }
1210
1211 delpol = xfrm_policy_insert_list(chain, policy, excl);
1212 if (delpol && excl) {
1213 __xfrm_policy_inexact_prune_bin(bin, false);
1214 return ERR_PTR(-EEXIST);
1215 }
1216
1217 chain = &net->xfrm.policy_inexact[dir];
1218 xfrm_policy_insert_inexact_list(chain, policy);
1219
1220 if (delpol)
1221 __xfrm_policy_inexact_prune_bin(bin, false);
1222
1223 return delpol;
1224 }
1225
xfrm_hash_rebuild(struct work_struct * work)1226 static void xfrm_hash_rebuild(struct work_struct *work)
1227 {
1228 struct net *net = container_of(work, struct net,
1229 xfrm.policy_hthresh.work);
1230 unsigned int hmask;
1231 struct xfrm_policy *pol;
1232 struct xfrm_policy *policy;
1233 struct hlist_head *chain;
1234 struct hlist_head *odst;
1235 struct hlist_node *newpos;
1236 int i;
1237 int dir;
1238 unsigned seq;
1239 u8 lbits4, rbits4, lbits6, rbits6;
1240
1241 mutex_lock(&hash_resize_mutex);
1242
1243 /* read selector prefixlen thresholds */
1244 do {
1245 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1246
1247 lbits4 = net->xfrm.policy_hthresh.lbits4;
1248 rbits4 = net->xfrm.policy_hthresh.rbits4;
1249 lbits6 = net->xfrm.policy_hthresh.lbits6;
1250 rbits6 = net->xfrm.policy_hthresh.rbits6;
1251 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1252
1253 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1254 write_seqcount_begin(&xfrm_policy_hash_generation);
1255
1256 /* make sure that we can insert the indirect policies again before
1257 * we start with destructive action.
1258 */
1259 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1260 struct xfrm_pol_inexact_bin *bin;
1261 u8 dbits, sbits;
1262
1263 dir = xfrm_policy_id2dir(policy->index);
1264 if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1265 continue;
1266
1267 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1268 if (policy->family == AF_INET) {
1269 dbits = rbits4;
1270 sbits = lbits4;
1271 } else {
1272 dbits = rbits6;
1273 sbits = lbits6;
1274 }
1275 } else {
1276 if (policy->family == AF_INET) {
1277 dbits = lbits4;
1278 sbits = rbits4;
1279 } else {
1280 dbits = lbits6;
1281 sbits = rbits6;
1282 }
1283 }
1284
1285 if (policy->selector.prefixlen_d < dbits ||
1286 policy->selector.prefixlen_s < sbits)
1287 continue;
1288
1289 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1290 if (!bin)
1291 goto out_unlock;
1292
1293 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1294 goto out_unlock;
1295 }
1296
1297 /* reset the bydst and inexact table in all directions */
1298 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1299 struct hlist_node *n;
1300
1301 hlist_for_each_entry_safe(policy, n,
1302 &net->xfrm.policy_inexact[dir],
1303 bydst_inexact_list) {
1304 hlist_del_rcu(&policy->bydst);
1305 hlist_del_init(&policy->bydst_inexact_list);
1306 }
1307
1308 hmask = net->xfrm.policy_bydst[dir].hmask;
1309 odst = net->xfrm.policy_bydst[dir].table;
1310 for (i = hmask; i >= 0; i--) {
1311 hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1312 hlist_del_rcu(&policy->bydst);
1313 }
1314 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1315 /* dir out => dst = remote, src = local */
1316 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1317 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1318 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1319 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1320 } else {
1321 /* dir in/fwd => dst = local, src = remote */
1322 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1323 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1324 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1325 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1326 }
1327 }
1328
1329 /* re-insert all policies by order of creation */
1330 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1331 if (policy->walk.dead)
1332 continue;
1333 dir = xfrm_policy_id2dir(policy->index);
1334 if (dir >= XFRM_POLICY_MAX) {
1335 /* skip socket policies */
1336 continue;
1337 }
1338 newpos = NULL;
1339 chain = policy_hash_bysel(net, &policy->selector,
1340 policy->family, dir);
1341
1342 if (!chain) {
1343 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1344
1345 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1346 continue;
1347 }
1348
1349 hlist_for_each_entry(pol, chain, bydst) {
1350 if (policy->priority >= pol->priority)
1351 newpos = &pol->bydst;
1352 else
1353 break;
1354 }
1355 if (newpos)
1356 hlist_add_behind_rcu(&policy->bydst, newpos);
1357 else
1358 hlist_add_head_rcu(&policy->bydst, chain);
1359 }
1360
1361 out_unlock:
1362 __xfrm_policy_inexact_flush(net);
1363 write_seqcount_end(&xfrm_policy_hash_generation);
1364 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1365
1366 mutex_unlock(&hash_resize_mutex);
1367 }
1368
xfrm_policy_hash_rebuild(struct net * net)1369 void xfrm_policy_hash_rebuild(struct net *net)
1370 {
1371 schedule_work(&net->xfrm.policy_hthresh.work);
1372 }
1373 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1374
1375 /* Generate new index... KAME seems to generate them ordered by cost
1376 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1377 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1378 {
1379 static u32 idx_generator;
1380
1381 for (;;) {
1382 struct hlist_head *list;
1383 struct xfrm_policy *p;
1384 u32 idx;
1385 int found;
1386
1387 if (!index) {
1388 idx = (idx_generator | dir);
1389 idx_generator += 8;
1390 } else {
1391 idx = index;
1392 index = 0;
1393 }
1394
1395 if (idx == 0)
1396 idx = 8;
1397 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1398 found = 0;
1399 hlist_for_each_entry(p, list, byidx) {
1400 if (p->index == idx) {
1401 found = 1;
1402 break;
1403 }
1404 }
1405 if (!found)
1406 return idx;
1407 }
1408 }
1409
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1410 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1411 {
1412 u32 *p1 = (u32 *) s1;
1413 u32 *p2 = (u32 *) s2;
1414 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1415 int i;
1416
1417 for (i = 0; i < len; i++) {
1418 if (p1[i] != p2[i])
1419 return 1;
1420 }
1421
1422 return 0;
1423 }
1424
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1425 static void xfrm_policy_requeue(struct xfrm_policy *old,
1426 struct xfrm_policy *new)
1427 {
1428 struct xfrm_policy_queue *pq = &old->polq;
1429 struct sk_buff_head list;
1430
1431 if (skb_queue_empty(&pq->hold_queue))
1432 return;
1433
1434 __skb_queue_head_init(&list);
1435
1436 spin_lock_bh(&pq->hold_queue.lock);
1437 skb_queue_splice_init(&pq->hold_queue, &list);
1438 if (del_timer(&pq->hold_timer))
1439 xfrm_pol_put(old);
1440 spin_unlock_bh(&pq->hold_queue.lock);
1441
1442 pq = &new->polq;
1443
1444 spin_lock_bh(&pq->hold_queue.lock);
1445 skb_queue_splice(&list, &pq->hold_queue);
1446 pq->timeout = XFRM_QUEUE_TMO_MIN;
1447 if (!mod_timer(&pq->hold_timer, jiffies))
1448 xfrm_pol_hold(new);
1449 spin_unlock_bh(&pq->hold_queue.lock);
1450 }
1451
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1452 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1453 struct xfrm_policy *pol)
1454 {
1455 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1456 }
1457
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1458 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1459 {
1460 const struct xfrm_pol_inexact_key *k = data;
1461 u32 a = k->type << 24 | k->dir << 16 | k->family;
1462
1463 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1464 seed);
1465 }
1466
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1467 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1468 {
1469 const struct xfrm_pol_inexact_bin *b = data;
1470
1471 return xfrm_pol_bin_key(&b->k, 0, seed);
1472 }
1473
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1474 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1475 const void *ptr)
1476 {
1477 const struct xfrm_pol_inexact_key *key = arg->key;
1478 const struct xfrm_pol_inexact_bin *b = ptr;
1479 int ret;
1480
1481 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1482 return -1;
1483
1484 ret = b->k.dir ^ key->dir;
1485 if (ret)
1486 return ret;
1487
1488 ret = b->k.type ^ key->type;
1489 if (ret)
1490 return ret;
1491
1492 ret = b->k.family ^ key->family;
1493 if (ret)
1494 return ret;
1495
1496 return b->k.if_id ^ key->if_id;
1497 }
1498
1499 static const struct rhashtable_params xfrm_pol_inexact_params = {
1500 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1501 .hashfn = xfrm_pol_bin_key,
1502 .obj_hashfn = xfrm_pol_bin_obj,
1503 .obj_cmpfn = xfrm_pol_bin_cmp,
1504 .automatic_shrinking = true,
1505 };
1506
xfrm_policy_insert_inexact_list(struct hlist_head * chain,struct xfrm_policy * policy)1507 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1508 struct xfrm_policy *policy)
1509 {
1510 struct xfrm_policy *pol, *delpol = NULL;
1511 struct hlist_node *newpos = NULL;
1512 int i = 0;
1513
1514 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1515 if (pol->type == policy->type &&
1516 pol->if_id == policy->if_id &&
1517 !selector_cmp(&pol->selector, &policy->selector) &&
1518 xfrm_policy_mark_match(&policy->mark, pol) &&
1519 xfrm_sec_ctx_match(pol->security, policy->security) &&
1520 !WARN_ON(delpol)) {
1521 delpol = pol;
1522 if (policy->priority > pol->priority)
1523 continue;
1524 } else if (policy->priority >= pol->priority) {
1525 newpos = &pol->bydst_inexact_list;
1526 continue;
1527 }
1528 if (delpol)
1529 break;
1530 }
1531
1532 if (newpos)
1533 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1534 else
1535 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1536
1537 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1538 pol->pos = i;
1539 i++;
1540 }
1541 }
1542
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1543 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1544 struct xfrm_policy *policy,
1545 bool excl)
1546 {
1547 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1548
1549 hlist_for_each_entry(pol, chain, bydst) {
1550 if (pol->type == policy->type &&
1551 pol->if_id == policy->if_id &&
1552 !selector_cmp(&pol->selector, &policy->selector) &&
1553 xfrm_policy_mark_match(&policy->mark, pol) &&
1554 xfrm_sec_ctx_match(pol->security, policy->security) &&
1555 !WARN_ON(delpol)) {
1556 if (excl)
1557 return ERR_PTR(-EEXIST);
1558 delpol = pol;
1559 if (policy->priority > pol->priority)
1560 continue;
1561 } else if (policy->priority >= pol->priority) {
1562 newpos = pol;
1563 continue;
1564 }
1565 if (delpol)
1566 break;
1567 }
1568
1569 if (newpos)
1570 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1571 else
1572 hlist_add_head_rcu(&policy->bydst, chain);
1573
1574 return delpol;
1575 }
1576
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1577 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1578 {
1579 struct net *net = xp_net(policy);
1580 struct xfrm_policy *delpol;
1581 struct hlist_head *chain;
1582
1583 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1584 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1585 if (chain)
1586 delpol = xfrm_policy_insert_list(chain, policy, excl);
1587 else
1588 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1589
1590 if (IS_ERR(delpol)) {
1591 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1592 return PTR_ERR(delpol);
1593 }
1594
1595 __xfrm_policy_link(policy, dir);
1596
1597 /* After previous checking, family can either be AF_INET or AF_INET6 */
1598 if (policy->family == AF_INET)
1599 rt_genid_bump_ipv4(net);
1600 else
1601 rt_genid_bump_ipv6(net);
1602
1603 if (delpol) {
1604 xfrm_policy_requeue(delpol, policy);
1605 __xfrm_policy_unlink(delpol, dir);
1606 }
1607 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1608 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1609 policy->curlft.add_time = ktime_get_real_seconds();
1610 policy->curlft.use_time = 0;
1611 if (!mod_timer(&policy->timer, jiffies + HZ))
1612 xfrm_pol_hold(policy);
1613 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1614
1615 if (delpol)
1616 xfrm_policy_kill(delpol);
1617 else if (xfrm_bydst_should_resize(net, dir, NULL))
1618 schedule_work(&net->xfrm.policy_hash_work);
1619
1620 return 0;
1621 }
1622 EXPORT_SYMBOL(xfrm_policy_insert);
1623
1624 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1625 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1626 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1627 struct xfrm_sec_ctx *ctx)
1628 {
1629 struct xfrm_policy *pol;
1630
1631 if (!chain)
1632 return NULL;
1633
1634 hlist_for_each_entry(pol, chain, bydst) {
1635 if (pol->type == type &&
1636 pol->if_id == if_id &&
1637 xfrm_policy_mark_match(mark, pol) &&
1638 !selector_cmp(sel, &pol->selector) &&
1639 xfrm_sec_ctx_match(ctx, pol->security))
1640 return pol;
1641 }
1642
1643 return NULL;
1644 }
1645
1646 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1647 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1648 u8 type, int dir, struct xfrm_selector *sel,
1649 struct xfrm_sec_ctx *ctx, int delete, int *err)
1650 {
1651 struct xfrm_pol_inexact_bin *bin = NULL;
1652 struct xfrm_policy *pol, *ret = NULL;
1653 struct hlist_head *chain;
1654
1655 *err = 0;
1656 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1657 chain = policy_hash_bysel(net, sel, sel->family, dir);
1658 if (!chain) {
1659 struct xfrm_pol_inexact_candidates cand;
1660 int i;
1661
1662 bin = xfrm_policy_inexact_lookup(net, type,
1663 sel->family, dir, if_id);
1664 if (!bin) {
1665 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1666 return NULL;
1667 }
1668
1669 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1670 &sel->saddr,
1671 &sel->daddr)) {
1672 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1673 return NULL;
1674 }
1675
1676 pol = NULL;
1677 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1678 struct xfrm_policy *tmp;
1679
1680 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1681 if_id, type, dir,
1682 sel, ctx);
1683 if (!tmp)
1684 continue;
1685
1686 if (!pol || tmp->pos < pol->pos)
1687 pol = tmp;
1688 }
1689 } else {
1690 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1691 sel, ctx);
1692 }
1693
1694 if (pol) {
1695 xfrm_pol_hold(pol);
1696 if (delete) {
1697 *err = security_xfrm_policy_delete(pol->security);
1698 if (*err) {
1699 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1700 return pol;
1701 }
1702 __xfrm_policy_unlink(pol, dir);
1703 }
1704 ret = pol;
1705 }
1706 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1707
1708 if (ret && delete)
1709 xfrm_policy_kill(ret);
1710 if (bin && delete)
1711 xfrm_policy_inexact_prune_bin(bin);
1712 return ret;
1713 }
1714 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1715
1716 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1717 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1718 u8 type, int dir, u32 id, int delete, int *err)
1719 {
1720 struct xfrm_policy *pol, *ret;
1721 struct hlist_head *chain;
1722
1723 *err = -ENOENT;
1724 if (xfrm_policy_id2dir(id) != dir)
1725 return NULL;
1726
1727 *err = 0;
1728 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1729 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1730 ret = NULL;
1731 hlist_for_each_entry(pol, chain, byidx) {
1732 if (pol->type == type && pol->index == id &&
1733 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1734 xfrm_pol_hold(pol);
1735 if (delete) {
1736 *err = security_xfrm_policy_delete(
1737 pol->security);
1738 if (*err) {
1739 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1740 return pol;
1741 }
1742 __xfrm_policy_unlink(pol, dir);
1743 }
1744 ret = pol;
1745 break;
1746 }
1747 }
1748 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1749
1750 if (ret && delete)
1751 xfrm_policy_kill(ret);
1752 return ret;
1753 }
1754 EXPORT_SYMBOL(xfrm_policy_byid);
1755
1756 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1757 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1758 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1759 {
1760 struct xfrm_policy *pol;
1761 int err = 0;
1762
1763 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1764 if (pol->walk.dead ||
1765 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1766 pol->type != type)
1767 continue;
1768
1769 err = security_xfrm_policy_delete(pol->security);
1770 if (err) {
1771 xfrm_audit_policy_delete(pol, 0, task_valid);
1772 return err;
1773 }
1774 }
1775 return err;
1776 }
1777 #else
1778 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1779 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1780 {
1781 return 0;
1782 }
1783 #endif
1784
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1785 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1786 {
1787 int dir, err = 0, cnt = 0;
1788 struct xfrm_policy *pol;
1789
1790 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1791
1792 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1793 if (err)
1794 goto out;
1795
1796 again:
1797 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1798 dir = xfrm_policy_id2dir(pol->index);
1799 if (pol->walk.dead ||
1800 dir >= XFRM_POLICY_MAX ||
1801 pol->type != type)
1802 continue;
1803
1804 __xfrm_policy_unlink(pol, dir);
1805 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1806 cnt++;
1807 xfrm_audit_policy_delete(pol, 1, task_valid);
1808 xfrm_policy_kill(pol);
1809 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1810 goto again;
1811 }
1812 if (cnt)
1813 __xfrm_policy_inexact_flush(net);
1814 else
1815 err = -ESRCH;
1816 out:
1817 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1818 return err;
1819 }
1820 EXPORT_SYMBOL(xfrm_policy_flush);
1821
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1822 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1823 int (*func)(struct xfrm_policy *, int, int, void*),
1824 void *data)
1825 {
1826 struct xfrm_policy *pol;
1827 struct xfrm_policy_walk_entry *x;
1828 int error = 0;
1829
1830 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1831 walk->type != XFRM_POLICY_TYPE_ANY)
1832 return -EINVAL;
1833
1834 if (list_empty(&walk->walk.all) && walk->seq != 0)
1835 return 0;
1836
1837 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1838 if (list_empty(&walk->walk.all))
1839 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1840 else
1841 x = list_first_entry(&walk->walk.all,
1842 struct xfrm_policy_walk_entry, all);
1843
1844 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1845 if (x->dead)
1846 continue;
1847 pol = container_of(x, struct xfrm_policy, walk);
1848 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1849 walk->type != pol->type)
1850 continue;
1851 error = func(pol, xfrm_policy_id2dir(pol->index),
1852 walk->seq, data);
1853 if (error) {
1854 list_move_tail(&walk->walk.all, &x->all);
1855 goto out;
1856 }
1857 walk->seq++;
1858 }
1859 if (walk->seq == 0) {
1860 error = -ENOENT;
1861 goto out;
1862 }
1863 list_del_init(&walk->walk.all);
1864 out:
1865 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1866 return error;
1867 }
1868 EXPORT_SYMBOL(xfrm_policy_walk);
1869
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1870 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1871 {
1872 INIT_LIST_HEAD(&walk->walk.all);
1873 walk->walk.dead = 1;
1874 walk->type = type;
1875 walk->seq = 0;
1876 }
1877 EXPORT_SYMBOL(xfrm_policy_walk_init);
1878
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1879 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1880 {
1881 if (list_empty(&walk->walk.all))
1882 return;
1883
1884 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1885 list_del(&walk->walk.all);
1886 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1887 }
1888 EXPORT_SYMBOL(xfrm_policy_walk_done);
1889
1890 /*
1891 * Find policy to apply to this flow.
1892 *
1893 * Returns 0 if policy found, else an -errno.
1894 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)1895 static int xfrm_policy_match(const struct xfrm_policy *pol,
1896 const struct flowi *fl,
1897 u8 type, u16 family, int dir, u32 if_id)
1898 {
1899 const struct xfrm_selector *sel = &pol->selector;
1900 int ret = -ESRCH;
1901 bool match;
1902
1903 if (pol->family != family ||
1904 pol->if_id != if_id ||
1905 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1906 pol->type != type)
1907 return ret;
1908
1909 match = xfrm_selector_match(sel, fl, family);
1910 if (match)
1911 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1912 dir);
1913 return ret;
1914 }
1915
1916 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)1917 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1918 seqcount_spinlock_t *count,
1919 const xfrm_address_t *addr, u16 family)
1920 {
1921 const struct rb_node *parent;
1922 int seq;
1923
1924 again:
1925 seq = read_seqcount_begin(count);
1926
1927 parent = rcu_dereference_raw(r->rb_node);
1928 while (parent) {
1929 struct xfrm_pol_inexact_node *node;
1930 int delta;
1931
1932 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1933
1934 delta = xfrm_policy_addr_delta(addr, &node->addr,
1935 node->prefixlen, family);
1936 if (delta < 0) {
1937 parent = rcu_dereference_raw(parent->rb_left);
1938 continue;
1939 } else if (delta > 0) {
1940 parent = rcu_dereference_raw(parent->rb_right);
1941 continue;
1942 }
1943
1944 return node;
1945 }
1946
1947 if (read_seqcount_retry(count, seq))
1948 goto again;
1949
1950 return NULL;
1951 }
1952
1953 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)1954 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1955 struct xfrm_pol_inexact_bin *b,
1956 const xfrm_address_t *saddr,
1957 const xfrm_address_t *daddr)
1958 {
1959 struct xfrm_pol_inexact_node *n;
1960 u16 family;
1961
1962 if (!b)
1963 return false;
1964
1965 family = b->k.family;
1966 memset(cand, 0, sizeof(*cand));
1967 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1968
1969 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1970 family);
1971 if (n) {
1972 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1973 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1974 family);
1975 if (n)
1976 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1977 }
1978
1979 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1980 family);
1981 if (n)
1982 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1983
1984 return true;
1985 }
1986
1987 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)1988 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1989 u8 dir, u32 if_id)
1990 {
1991 struct xfrm_pol_inexact_key k = {
1992 .family = family,
1993 .type = type,
1994 .dir = dir,
1995 .if_id = if_id,
1996 };
1997
1998 write_pnet(&k.net, net);
1999
2000 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2001 xfrm_pol_inexact_params);
2002 }
2003
2004 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2005 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2006 u8 dir, u32 if_id)
2007 {
2008 struct xfrm_pol_inexact_bin *bin;
2009
2010 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2011
2012 rcu_read_lock();
2013 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2014 rcu_read_unlock();
2015
2016 return bin;
2017 }
2018
2019 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)2020 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2021 struct xfrm_policy *prefer,
2022 const struct flowi *fl,
2023 u8 type, u16 family, int dir, u32 if_id)
2024 {
2025 u32 priority = prefer ? prefer->priority : ~0u;
2026 struct xfrm_policy *pol;
2027
2028 if (!chain)
2029 return NULL;
2030
2031 hlist_for_each_entry_rcu(pol, chain, bydst) {
2032 int err;
2033
2034 if (pol->priority > priority)
2035 break;
2036
2037 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2038 if (err) {
2039 if (err != -ESRCH)
2040 return ERR_PTR(err);
2041
2042 continue;
2043 }
2044
2045 if (prefer) {
2046 /* matches. Is it older than *prefer? */
2047 if (pol->priority == priority &&
2048 prefer->pos < pol->pos)
2049 return prefer;
2050 }
2051
2052 return pol;
2053 }
2054
2055 return NULL;
2056 }
2057
2058 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)2059 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2060 struct xfrm_policy *prefer,
2061 const struct flowi *fl,
2062 u8 type, u16 family, int dir, u32 if_id)
2063 {
2064 struct xfrm_policy *tmp;
2065 int i;
2066
2067 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2068 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2069 prefer,
2070 fl, type, family, dir,
2071 if_id);
2072 if (!tmp)
2073 continue;
2074
2075 if (IS_ERR(tmp))
2076 return tmp;
2077 prefer = tmp;
2078 }
2079
2080 return prefer;
2081 }
2082
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2083 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2084 const struct flowi *fl,
2085 u16 family, u8 dir,
2086 u32 if_id)
2087 {
2088 struct xfrm_pol_inexact_candidates cand;
2089 const xfrm_address_t *daddr, *saddr;
2090 struct xfrm_pol_inexact_bin *bin;
2091 struct xfrm_policy *pol, *ret;
2092 struct hlist_head *chain;
2093 unsigned int sequence;
2094 int err;
2095
2096 daddr = xfrm_flowi_daddr(fl, family);
2097 saddr = xfrm_flowi_saddr(fl, family);
2098 if (unlikely(!daddr || !saddr))
2099 return NULL;
2100
2101 rcu_read_lock();
2102 retry:
2103 do {
2104 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2105 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2106 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2107
2108 ret = NULL;
2109 hlist_for_each_entry_rcu(pol, chain, bydst) {
2110 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2111 if (err) {
2112 if (err == -ESRCH)
2113 continue;
2114 else {
2115 ret = ERR_PTR(err);
2116 goto fail;
2117 }
2118 } else {
2119 ret = pol;
2120 break;
2121 }
2122 }
2123 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2124 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2125 daddr))
2126 goto skip_inexact;
2127
2128 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2129 family, dir, if_id);
2130 if (pol) {
2131 ret = pol;
2132 if (IS_ERR(pol))
2133 goto fail;
2134 }
2135
2136 skip_inexact:
2137 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2138 goto retry;
2139
2140 if (ret && !xfrm_pol_hold_rcu(ret))
2141 goto retry;
2142 fail:
2143 rcu_read_unlock();
2144
2145 return ret;
2146 }
2147
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2148 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2149 const struct flowi *fl,
2150 u16 family, u8 dir, u32 if_id)
2151 {
2152 #ifdef CONFIG_XFRM_SUB_POLICY
2153 struct xfrm_policy *pol;
2154
2155 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2156 dir, if_id);
2157 if (pol != NULL)
2158 return pol;
2159 #endif
2160 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2161 dir, if_id);
2162 }
2163
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2164 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2165 const struct flowi *fl,
2166 u16 family, u32 if_id)
2167 {
2168 struct xfrm_policy *pol;
2169
2170 rcu_read_lock();
2171 again:
2172 pol = rcu_dereference(sk->sk_policy[dir]);
2173 if (pol != NULL) {
2174 bool match;
2175 int err = 0;
2176
2177 if (pol->family != family) {
2178 pol = NULL;
2179 goto out;
2180 }
2181
2182 match = xfrm_selector_match(&pol->selector, fl, family);
2183 if (match) {
2184 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2185 pol->if_id != if_id) {
2186 pol = NULL;
2187 goto out;
2188 }
2189 err = security_xfrm_policy_lookup(pol->security,
2190 fl->flowi_secid,
2191 dir);
2192 if (!err) {
2193 if (!xfrm_pol_hold_rcu(pol))
2194 goto again;
2195 } else if (err == -ESRCH) {
2196 pol = NULL;
2197 } else {
2198 pol = ERR_PTR(err);
2199 }
2200 } else
2201 pol = NULL;
2202 }
2203 out:
2204 rcu_read_unlock();
2205 return pol;
2206 }
2207
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2208 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2209 {
2210 struct net *net = xp_net(pol);
2211
2212 list_add(&pol->walk.all, &net->xfrm.policy_all);
2213 net->xfrm.policy_count[dir]++;
2214 xfrm_pol_hold(pol);
2215 }
2216
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2217 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2218 int dir)
2219 {
2220 struct net *net = xp_net(pol);
2221
2222 if (list_empty(&pol->walk.all))
2223 return NULL;
2224
2225 /* Socket policies are not hashed. */
2226 if (!hlist_unhashed(&pol->bydst)) {
2227 hlist_del_rcu(&pol->bydst);
2228 hlist_del_init(&pol->bydst_inexact_list);
2229 hlist_del(&pol->byidx);
2230 }
2231
2232 list_del_init(&pol->walk.all);
2233 net->xfrm.policy_count[dir]--;
2234
2235 return pol;
2236 }
2237
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2238 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2239 {
2240 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2241 }
2242
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2243 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2244 {
2245 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2246 }
2247
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2248 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2249 {
2250 struct net *net = xp_net(pol);
2251
2252 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2253 pol = __xfrm_policy_unlink(pol, dir);
2254 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2255 if (pol) {
2256 xfrm_policy_kill(pol);
2257 return 0;
2258 }
2259 return -ENOENT;
2260 }
2261 EXPORT_SYMBOL(xfrm_policy_delete);
2262
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2263 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2264 {
2265 struct net *net = sock_net(sk);
2266 struct xfrm_policy *old_pol;
2267
2268 #ifdef CONFIG_XFRM_SUB_POLICY
2269 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2270 return -EINVAL;
2271 #endif
2272
2273 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2274 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2275 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2276 if (pol) {
2277 pol->curlft.add_time = ktime_get_real_seconds();
2278 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2279 xfrm_sk_policy_link(pol, dir);
2280 }
2281 rcu_assign_pointer(sk->sk_policy[dir], pol);
2282 if (old_pol) {
2283 if (pol)
2284 xfrm_policy_requeue(old_pol, pol);
2285
2286 /* Unlinking succeeds always. This is the only function
2287 * allowed to delete or replace socket policy.
2288 */
2289 xfrm_sk_policy_unlink(old_pol, dir);
2290 }
2291 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2292
2293 if (old_pol) {
2294 xfrm_policy_kill(old_pol);
2295 }
2296 return 0;
2297 }
2298
clone_policy(const struct xfrm_policy * old,int dir)2299 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2300 {
2301 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2302 struct net *net = xp_net(old);
2303
2304 if (newp) {
2305 newp->selector = old->selector;
2306 if (security_xfrm_policy_clone(old->security,
2307 &newp->security)) {
2308 kfree(newp);
2309 return NULL; /* ENOMEM */
2310 }
2311 newp->lft = old->lft;
2312 newp->curlft = old->curlft;
2313 newp->mark = old->mark;
2314 newp->if_id = old->if_id;
2315 newp->action = old->action;
2316 newp->flags = old->flags;
2317 newp->xfrm_nr = old->xfrm_nr;
2318 newp->index = old->index;
2319 newp->type = old->type;
2320 newp->family = old->family;
2321 memcpy(newp->xfrm_vec, old->xfrm_vec,
2322 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2323 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2324 xfrm_sk_policy_link(newp, dir);
2325 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2326 xfrm_pol_put(newp);
2327 }
2328 return newp;
2329 }
2330
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2331 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2332 {
2333 const struct xfrm_policy *p;
2334 struct xfrm_policy *np;
2335 int i, ret = 0;
2336
2337 rcu_read_lock();
2338 for (i = 0; i < 2; i++) {
2339 p = rcu_dereference(osk->sk_policy[i]);
2340 if (p) {
2341 np = clone_policy(p, i);
2342 if (unlikely(!np)) {
2343 ret = -ENOMEM;
2344 break;
2345 }
2346 rcu_assign_pointer(sk->sk_policy[i], np);
2347 }
2348 }
2349 rcu_read_unlock();
2350 return ret;
2351 }
2352
2353 static int
xfrm_get_saddr(struct net * net,int oif,xfrm_address_t * local,xfrm_address_t * remote,unsigned short family,u32 mark)2354 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2355 xfrm_address_t *remote, unsigned short family, u32 mark)
2356 {
2357 int err;
2358 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2359
2360 if (unlikely(afinfo == NULL))
2361 return -EINVAL;
2362 err = afinfo->get_saddr(net, oif, local, remote, mark);
2363 rcu_read_unlock();
2364 return err;
2365 }
2366
2367 /* Resolve list of templates for the flow, given policy. */
2368
2369 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2370 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2371 struct xfrm_state **xfrm, unsigned short family)
2372 {
2373 struct net *net = xp_net(policy);
2374 int nx;
2375 int i, error;
2376 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2377 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2378 xfrm_address_t tmp;
2379
2380 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2381 struct xfrm_state *x;
2382 xfrm_address_t *remote = daddr;
2383 xfrm_address_t *local = saddr;
2384 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2385
2386 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2387 tmpl->mode == XFRM_MODE_BEET) {
2388 remote = &tmpl->id.daddr;
2389 local = &tmpl->saddr;
2390 if (xfrm_addr_any(local, tmpl->encap_family)) {
2391 error = xfrm_get_saddr(net, fl->flowi_oif,
2392 &tmp, remote,
2393 tmpl->encap_family, 0);
2394 if (error)
2395 goto fail;
2396 local = &tmp;
2397 }
2398 }
2399
2400 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2401 family, policy->if_id);
2402
2403 if (x && x->km.state == XFRM_STATE_VALID) {
2404 xfrm[nx++] = x;
2405 daddr = remote;
2406 saddr = local;
2407 continue;
2408 }
2409 if (x) {
2410 error = (x->km.state == XFRM_STATE_ERROR ?
2411 -EINVAL : -EAGAIN);
2412 xfrm_state_put(x);
2413 } else if (error == -ESRCH) {
2414 error = -EAGAIN;
2415 }
2416
2417 if (!tmpl->optional)
2418 goto fail;
2419 }
2420 return nx;
2421
2422 fail:
2423 for (nx--; nx >= 0; nx--)
2424 xfrm_state_put(xfrm[nx]);
2425 return error;
2426 }
2427
2428 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2429 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2430 struct xfrm_state **xfrm, unsigned short family)
2431 {
2432 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2433 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2434 int cnx = 0;
2435 int error;
2436 int ret;
2437 int i;
2438
2439 for (i = 0; i < npols; i++) {
2440 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2441 error = -ENOBUFS;
2442 goto fail;
2443 }
2444
2445 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2446 if (ret < 0) {
2447 error = ret;
2448 goto fail;
2449 } else
2450 cnx += ret;
2451 }
2452
2453 /* found states are sorted for outbound processing */
2454 if (npols > 1)
2455 xfrm_state_sort(xfrm, tpp, cnx, family);
2456
2457 return cnx;
2458
2459 fail:
2460 for (cnx--; cnx >= 0; cnx--)
2461 xfrm_state_put(tpp[cnx]);
2462 return error;
2463
2464 }
2465
xfrm_get_tos(const struct flowi * fl,int family)2466 static int xfrm_get_tos(const struct flowi *fl, int family)
2467 {
2468 if (family == AF_INET)
2469 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2470
2471 return 0;
2472 }
2473
xfrm_alloc_dst(struct net * net,int family)2474 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2475 {
2476 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2477 struct dst_ops *dst_ops;
2478 struct xfrm_dst *xdst;
2479
2480 if (!afinfo)
2481 return ERR_PTR(-EINVAL);
2482
2483 switch (family) {
2484 case AF_INET:
2485 dst_ops = &net->xfrm.xfrm4_dst_ops;
2486 break;
2487 #if IS_ENABLED(CONFIG_IPV6)
2488 case AF_INET6:
2489 dst_ops = &net->xfrm.xfrm6_dst_ops;
2490 break;
2491 #endif
2492 default:
2493 BUG();
2494 }
2495 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2496
2497 if (likely(xdst)) {
2498 struct dst_entry *dst = &xdst->u.dst;
2499
2500 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2501 } else
2502 xdst = ERR_PTR(-ENOBUFS);
2503
2504 rcu_read_unlock();
2505
2506 return xdst;
2507 }
2508
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2509 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2510 int nfheader_len)
2511 {
2512 if (dst->ops->family == AF_INET6) {
2513 struct rt6_info *rt = (struct rt6_info *)dst;
2514 path->path_cookie = rt6_get_cookie(rt);
2515 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2516 }
2517 }
2518
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2519 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2520 const struct flowi *fl)
2521 {
2522 const struct xfrm_policy_afinfo *afinfo =
2523 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2524 int err;
2525
2526 if (!afinfo)
2527 return -EINVAL;
2528
2529 err = afinfo->fill_dst(xdst, dev, fl);
2530
2531 rcu_read_unlock();
2532
2533 return err;
2534 }
2535
2536
2537 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2538 * all the metrics... Shortly, bundle a bundle.
2539 */
2540
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2541 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2542 struct xfrm_state **xfrm,
2543 struct xfrm_dst **bundle,
2544 int nx,
2545 const struct flowi *fl,
2546 struct dst_entry *dst)
2547 {
2548 const struct xfrm_state_afinfo *afinfo;
2549 const struct xfrm_mode *inner_mode;
2550 struct net *net = xp_net(policy);
2551 unsigned long now = jiffies;
2552 struct net_device *dev;
2553 struct xfrm_dst *xdst_prev = NULL;
2554 struct xfrm_dst *xdst0 = NULL;
2555 int i = 0;
2556 int err;
2557 int header_len = 0;
2558 int nfheader_len = 0;
2559 int trailer_len = 0;
2560 int tos;
2561 int family = policy->selector.family;
2562 xfrm_address_t saddr, daddr;
2563
2564 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2565
2566 tos = xfrm_get_tos(fl, family);
2567
2568 dst_hold(dst);
2569
2570 for (; i < nx; i++) {
2571 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2572 struct dst_entry *dst1 = &xdst->u.dst;
2573
2574 err = PTR_ERR(xdst);
2575 if (IS_ERR(xdst)) {
2576 dst_release(dst);
2577 goto put_states;
2578 }
2579
2580 bundle[i] = xdst;
2581 if (!xdst_prev)
2582 xdst0 = xdst;
2583 else
2584 /* Ref count is taken during xfrm_alloc_dst()
2585 * No need to do dst_clone() on dst1
2586 */
2587 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2588
2589 if (xfrm[i]->sel.family == AF_UNSPEC) {
2590 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2591 xfrm_af2proto(family));
2592 if (!inner_mode) {
2593 err = -EAFNOSUPPORT;
2594 dst_release(dst);
2595 goto put_states;
2596 }
2597 } else
2598 inner_mode = &xfrm[i]->inner_mode;
2599
2600 xdst->route = dst;
2601 dst_copy_metrics(dst1, dst);
2602
2603 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2604 __u32 mark = 0;
2605
2606 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2607 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2608
2609 family = xfrm[i]->props.family;
2610 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2611 &saddr, &daddr, family, mark);
2612 err = PTR_ERR(dst);
2613 if (IS_ERR(dst))
2614 goto put_states;
2615 } else
2616 dst_hold(dst);
2617
2618 dst1->xfrm = xfrm[i];
2619 xdst->xfrm_genid = xfrm[i]->genid;
2620
2621 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2622 dst1->lastuse = now;
2623
2624 dst1->input = dst_discard;
2625
2626 rcu_read_lock();
2627 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2628 if (likely(afinfo))
2629 dst1->output = afinfo->output;
2630 else
2631 dst1->output = dst_discard_out;
2632 rcu_read_unlock();
2633
2634 xdst_prev = xdst;
2635
2636 header_len += xfrm[i]->props.header_len;
2637 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2638 nfheader_len += xfrm[i]->props.header_len;
2639 trailer_len += xfrm[i]->props.trailer_len;
2640 }
2641
2642 xfrm_dst_set_child(xdst_prev, dst);
2643 xdst0->path = dst;
2644
2645 err = -ENODEV;
2646 dev = dst->dev;
2647 if (!dev)
2648 goto free_dst;
2649
2650 xfrm_init_path(xdst0, dst, nfheader_len);
2651 xfrm_init_pmtu(bundle, nx);
2652
2653 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2654 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2655 err = xfrm_fill_dst(xdst_prev, dev, fl);
2656 if (err)
2657 goto free_dst;
2658
2659 xdst_prev->u.dst.header_len = header_len;
2660 xdst_prev->u.dst.trailer_len = trailer_len;
2661 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2662 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2663 }
2664
2665 return &xdst0->u.dst;
2666
2667 put_states:
2668 for (; i < nx; i++)
2669 xfrm_state_put(xfrm[i]);
2670 free_dst:
2671 if (xdst0)
2672 dst_release_immediate(&xdst0->u.dst);
2673
2674 return ERR_PTR(err);
2675 }
2676
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2677 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2678 struct xfrm_policy **pols,
2679 int *num_pols, int *num_xfrms)
2680 {
2681 int i;
2682
2683 if (*num_pols == 0 || !pols[0]) {
2684 *num_pols = 0;
2685 *num_xfrms = 0;
2686 return 0;
2687 }
2688 if (IS_ERR(pols[0])) {
2689 *num_pols = 0;
2690 return PTR_ERR(pols[0]);
2691 }
2692
2693 *num_xfrms = pols[0]->xfrm_nr;
2694
2695 #ifdef CONFIG_XFRM_SUB_POLICY
2696 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2697 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2698 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2699 XFRM_POLICY_TYPE_MAIN,
2700 fl, family,
2701 XFRM_POLICY_OUT,
2702 pols[0]->if_id);
2703 if (pols[1]) {
2704 if (IS_ERR(pols[1])) {
2705 xfrm_pols_put(pols, *num_pols);
2706 *num_pols = 0;
2707 return PTR_ERR(pols[1]);
2708 }
2709 (*num_pols)++;
2710 (*num_xfrms) += pols[1]->xfrm_nr;
2711 }
2712 }
2713 #endif
2714 for (i = 0; i < *num_pols; i++) {
2715 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2716 *num_xfrms = -1;
2717 break;
2718 }
2719 }
2720
2721 return 0;
2722
2723 }
2724
2725 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2726 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2727 const struct flowi *fl, u16 family,
2728 struct dst_entry *dst_orig)
2729 {
2730 struct net *net = xp_net(pols[0]);
2731 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2732 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2733 struct xfrm_dst *xdst;
2734 struct dst_entry *dst;
2735 int err;
2736
2737 /* Try to instantiate a bundle */
2738 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2739 if (err <= 0) {
2740 if (err == 0)
2741 return NULL;
2742
2743 if (err != -EAGAIN)
2744 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2745 return ERR_PTR(err);
2746 }
2747
2748 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2749 if (IS_ERR(dst)) {
2750 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2751 return ERR_CAST(dst);
2752 }
2753
2754 xdst = (struct xfrm_dst *)dst;
2755 xdst->num_xfrms = err;
2756 xdst->num_pols = num_pols;
2757 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2758 xdst->policy_genid = atomic_read(&pols[0]->genid);
2759
2760 return xdst;
2761 }
2762
xfrm_policy_queue_process(struct timer_list * t)2763 static void xfrm_policy_queue_process(struct timer_list *t)
2764 {
2765 struct sk_buff *skb;
2766 struct sock *sk;
2767 struct dst_entry *dst;
2768 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2769 struct net *net = xp_net(pol);
2770 struct xfrm_policy_queue *pq = &pol->polq;
2771 struct flowi fl;
2772 struct sk_buff_head list;
2773 __u32 skb_mark;
2774
2775 spin_lock(&pq->hold_queue.lock);
2776 skb = skb_peek(&pq->hold_queue);
2777 if (!skb) {
2778 spin_unlock(&pq->hold_queue.lock);
2779 goto out;
2780 }
2781 dst = skb_dst(skb);
2782 sk = skb->sk;
2783
2784 /* Fixup the mark to support VTI. */
2785 skb_mark = skb->mark;
2786 skb->mark = pol->mark.v;
2787 xfrm_decode_session(skb, &fl, dst->ops->family);
2788 skb->mark = skb_mark;
2789 spin_unlock(&pq->hold_queue.lock);
2790
2791 dst_hold(xfrm_dst_path(dst));
2792 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2793 if (IS_ERR(dst))
2794 goto purge_queue;
2795
2796 if (dst->flags & DST_XFRM_QUEUE) {
2797 dst_release(dst);
2798
2799 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2800 goto purge_queue;
2801
2802 pq->timeout = pq->timeout << 1;
2803 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2804 xfrm_pol_hold(pol);
2805 goto out;
2806 }
2807
2808 dst_release(dst);
2809
2810 __skb_queue_head_init(&list);
2811
2812 spin_lock(&pq->hold_queue.lock);
2813 pq->timeout = 0;
2814 skb_queue_splice_init(&pq->hold_queue, &list);
2815 spin_unlock(&pq->hold_queue.lock);
2816
2817 while (!skb_queue_empty(&list)) {
2818 skb = __skb_dequeue(&list);
2819
2820 /* Fixup the mark to support VTI. */
2821 skb_mark = skb->mark;
2822 skb->mark = pol->mark.v;
2823 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2824 skb->mark = skb_mark;
2825
2826 dst_hold(xfrm_dst_path(skb_dst(skb)));
2827 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2828 if (IS_ERR(dst)) {
2829 kfree_skb(skb);
2830 continue;
2831 }
2832
2833 nf_reset_ct(skb);
2834 skb_dst_drop(skb);
2835 skb_dst_set(skb, dst);
2836
2837 dst_output(net, skb->sk, skb);
2838 }
2839
2840 out:
2841 xfrm_pol_put(pol);
2842 return;
2843
2844 purge_queue:
2845 pq->timeout = 0;
2846 skb_queue_purge(&pq->hold_queue);
2847 xfrm_pol_put(pol);
2848 }
2849
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2850 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2851 {
2852 unsigned long sched_next;
2853 struct dst_entry *dst = skb_dst(skb);
2854 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2855 struct xfrm_policy *pol = xdst->pols[0];
2856 struct xfrm_policy_queue *pq = &pol->polq;
2857
2858 if (unlikely(skb_fclone_busy(sk, skb))) {
2859 kfree_skb(skb);
2860 return 0;
2861 }
2862
2863 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2864 kfree_skb(skb);
2865 return -EAGAIN;
2866 }
2867
2868 skb_dst_force(skb);
2869
2870 spin_lock_bh(&pq->hold_queue.lock);
2871
2872 if (!pq->timeout)
2873 pq->timeout = XFRM_QUEUE_TMO_MIN;
2874
2875 sched_next = jiffies + pq->timeout;
2876
2877 if (del_timer(&pq->hold_timer)) {
2878 if (time_before(pq->hold_timer.expires, sched_next))
2879 sched_next = pq->hold_timer.expires;
2880 xfrm_pol_put(pol);
2881 }
2882
2883 __skb_queue_tail(&pq->hold_queue, skb);
2884 if (!mod_timer(&pq->hold_timer, sched_next))
2885 xfrm_pol_hold(pol);
2886
2887 spin_unlock_bh(&pq->hold_queue.lock);
2888
2889 return 0;
2890 }
2891
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)2892 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2893 struct xfrm_flo *xflo,
2894 const struct flowi *fl,
2895 int num_xfrms,
2896 u16 family)
2897 {
2898 int err;
2899 struct net_device *dev;
2900 struct dst_entry *dst;
2901 struct dst_entry *dst1;
2902 struct xfrm_dst *xdst;
2903
2904 xdst = xfrm_alloc_dst(net, family);
2905 if (IS_ERR(xdst))
2906 return xdst;
2907
2908 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2909 net->xfrm.sysctl_larval_drop ||
2910 num_xfrms <= 0)
2911 return xdst;
2912
2913 dst = xflo->dst_orig;
2914 dst1 = &xdst->u.dst;
2915 dst_hold(dst);
2916 xdst->route = dst;
2917
2918 dst_copy_metrics(dst1, dst);
2919
2920 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2921 dst1->flags |= DST_XFRM_QUEUE;
2922 dst1->lastuse = jiffies;
2923
2924 dst1->input = dst_discard;
2925 dst1->output = xdst_queue_output;
2926
2927 dst_hold(dst);
2928 xfrm_dst_set_child(xdst, dst);
2929 xdst->path = dst;
2930
2931 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2932
2933 err = -ENODEV;
2934 dev = dst->dev;
2935 if (!dev)
2936 goto free_dst;
2937
2938 err = xfrm_fill_dst(xdst, dev, fl);
2939 if (err)
2940 goto free_dst;
2941
2942 out:
2943 return xdst;
2944
2945 free_dst:
2946 dst_release(dst1);
2947 xdst = ERR_PTR(err);
2948 goto out;
2949 }
2950
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)2951 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2952 const struct flowi *fl,
2953 u16 family, u8 dir,
2954 struct xfrm_flo *xflo, u32 if_id)
2955 {
2956 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2957 int num_pols = 0, num_xfrms = 0, err;
2958 struct xfrm_dst *xdst;
2959
2960 /* Resolve policies to use if we couldn't get them from
2961 * previous cache entry */
2962 num_pols = 1;
2963 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2964 err = xfrm_expand_policies(fl, family, pols,
2965 &num_pols, &num_xfrms);
2966 if (err < 0)
2967 goto inc_error;
2968 if (num_pols == 0)
2969 return NULL;
2970 if (num_xfrms <= 0)
2971 goto make_dummy_bundle;
2972
2973 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2974 xflo->dst_orig);
2975 if (IS_ERR(xdst)) {
2976 err = PTR_ERR(xdst);
2977 if (err == -EREMOTE) {
2978 xfrm_pols_put(pols, num_pols);
2979 return NULL;
2980 }
2981
2982 if (err != -EAGAIN)
2983 goto error;
2984 goto make_dummy_bundle;
2985 } else if (xdst == NULL) {
2986 num_xfrms = 0;
2987 goto make_dummy_bundle;
2988 }
2989
2990 return xdst;
2991
2992 make_dummy_bundle:
2993 /* We found policies, but there's no bundles to instantiate:
2994 * either because the policy blocks, has no transformations or
2995 * we could not build template (no xfrm_states).*/
2996 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2997 if (IS_ERR(xdst)) {
2998 xfrm_pols_put(pols, num_pols);
2999 return ERR_CAST(xdst);
3000 }
3001 xdst->num_pols = num_pols;
3002 xdst->num_xfrms = num_xfrms;
3003 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3004
3005 return xdst;
3006
3007 inc_error:
3008 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3009 error:
3010 xfrm_pols_put(pols, num_pols);
3011 return ERR_PTR(err);
3012 }
3013
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3014 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3015 struct dst_entry *dst_orig)
3016 {
3017 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3018 struct dst_entry *ret;
3019
3020 if (!afinfo) {
3021 dst_release(dst_orig);
3022 return ERR_PTR(-EINVAL);
3023 } else {
3024 ret = afinfo->blackhole_route(net, dst_orig);
3025 }
3026 rcu_read_unlock();
3027
3028 return ret;
3029 }
3030
3031 /* Finds/creates a bundle for given flow and if_id
3032 *
3033 * At the moment we eat a raw IP route. Mostly to speed up lookups
3034 * on interfaces with disabled IPsec.
3035 *
3036 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3037 * compatibility
3038 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3039 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3040 struct dst_entry *dst_orig,
3041 const struct flowi *fl,
3042 const struct sock *sk,
3043 int flags, u32 if_id)
3044 {
3045 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3046 struct xfrm_dst *xdst;
3047 struct dst_entry *dst, *route;
3048 u16 family = dst_orig->ops->family;
3049 u8 dir = XFRM_POLICY_OUT;
3050 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3051
3052 dst = NULL;
3053 xdst = NULL;
3054 route = NULL;
3055
3056 sk = sk_const_to_full_sk(sk);
3057 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3058 num_pols = 1;
3059 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3060 if_id);
3061 err = xfrm_expand_policies(fl, family, pols,
3062 &num_pols, &num_xfrms);
3063 if (err < 0)
3064 goto dropdst;
3065
3066 if (num_pols) {
3067 if (num_xfrms <= 0) {
3068 drop_pols = num_pols;
3069 goto no_transform;
3070 }
3071
3072 xdst = xfrm_resolve_and_create_bundle(
3073 pols, num_pols, fl,
3074 family, dst_orig);
3075
3076 if (IS_ERR(xdst)) {
3077 xfrm_pols_put(pols, num_pols);
3078 err = PTR_ERR(xdst);
3079 if (err == -EREMOTE)
3080 goto nopol;
3081
3082 goto dropdst;
3083 } else if (xdst == NULL) {
3084 num_xfrms = 0;
3085 drop_pols = num_pols;
3086 goto no_transform;
3087 }
3088
3089 route = xdst->route;
3090 }
3091 }
3092
3093 if (xdst == NULL) {
3094 struct xfrm_flo xflo;
3095
3096 xflo.dst_orig = dst_orig;
3097 xflo.flags = flags;
3098
3099 /* To accelerate a bit... */
3100 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3101 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3102 goto nopol;
3103
3104 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3105 if (xdst == NULL)
3106 goto nopol;
3107 if (IS_ERR(xdst)) {
3108 err = PTR_ERR(xdst);
3109 goto dropdst;
3110 }
3111
3112 num_pols = xdst->num_pols;
3113 num_xfrms = xdst->num_xfrms;
3114 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3115 route = xdst->route;
3116 }
3117
3118 dst = &xdst->u.dst;
3119 if (route == NULL && num_xfrms > 0) {
3120 /* The only case when xfrm_bundle_lookup() returns a
3121 * bundle with null route, is when the template could
3122 * not be resolved. It means policies are there, but
3123 * bundle could not be created, since we don't yet
3124 * have the xfrm_state's. We need to wait for KM to
3125 * negotiate new SA's or bail out with error.*/
3126 if (net->xfrm.sysctl_larval_drop) {
3127 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3128 err = -EREMOTE;
3129 goto error;
3130 }
3131
3132 err = -EAGAIN;
3133
3134 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3135 goto error;
3136 }
3137
3138 no_transform:
3139 if (num_pols == 0)
3140 goto nopol;
3141
3142 if ((flags & XFRM_LOOKUP_ICMP) &&
3143 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3144 err = -ENOENT;
3145 goto error;
3146 }
3147
3148 for (i = 0; i < num_pols; i++)
3149 pols[i]->curlft.use_time = ktime_get_real_seconds();
3150
3151 if (num_xfrms < 0) {
3152 /* Prohibit the flow */
3153 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3154 err = -EPERM;
3155 goto error;
3156 } else if (num_xfrms > 0) {
3157 /* Flow transformed */
3158 dst_release(dst_orig);
3159 } else {
3160 /* Flow passes untransformed */
3161 dst_release(dst);
3162 dst = dst_orig;
3163 }
3164 ok:
3165 xfrm_pols_put(pols, drop_pols);
3166 if (dst && dst->xfrm &&
3167 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3168 dst->flags |= DST_XFRM_TUNNEL;
3169 return dst;
3170
3171 nopol:
3172 if (!(flags & XFRM_LOOKUP_ICMP)) {
3173 dst = dst_orig;
3174 goto ok;
3175 }
3176 err = -ENOENT;
3177 error:
3178 dst_release(dst);
3179 dropdst:
3180 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3181 dst_release(dst_orig);
3182 xfrm_pols_put(pols, drop_pols);
3183 return ERR_PTR(err);
3184 }
3185 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3186
3187 /* Main function: finds/creates a bundle for given flow.
3188 *
3189 * At the moment we eat a raw IP route. Mostly to speed up lookups
3190 * on interfaces with disabled IPsec.
3191 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3192 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3193 const struct flowi *fl, const struct sock *sk,
3194 int flags)
3195 {
3196 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3197 }
3198 EXPORT_SYMBOL(xfrm_lookup);
3199
3200 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3201 * Otherwise we may send out blackholed packets.
3202 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3203 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3204 const struct flowi *fl,
3205 const struct sock *sk, int flags)
3206 {
3207 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3208 flags | XFRM_LOOKUP_QUEUE |
3209 XFRM_LOOKUP_KEEP_DST_REF);
3210
3211 if (PTR_ERR(dst) == -EREMOTE)
3212 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3213
3214 if (IS_ERR(dst))
3215 dst_release(dst_orig);
3216
3217 return dst;
3218 }
3219 EXPORT_SYMBOL(xfrm_lookup_route);
3220
3221 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3222 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3223 {
3224 struct sec_path *sp = skb_sec_path(skb);
3225 struct xfrm_state *x;
3226
3227 if (!sp || idx < 0 || idx >= sp->len)
3228 return 0;
3229 x = sp->xvec[idx];
3230 if (!x->type->reject)
3231 return 0;
3232 return x->type->reject(x, skb, fl);
3233 }
3234
3235 /* When skb is transformed back to its "native" form, we have to
3236 * check policy restrictions. At the moment we make this in maximally
3237 * stupid way. Shame on me. :-) Of course, connected sockets must
3238 * have policy cached at them.
3239 */
3240
3241 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family,u32 if_id)3242 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3243 unsigned short family, u32 if_id)
3244 {
3245 if (xfrm_state_kern(x))
3246 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3247 return x->id.proto == tmpl->id.proto &&
3248 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3249 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3250 x->props.mode == tmpl->mode &&
3251 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3252 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3253 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3254 xfrm_state_addr_cmp(tmpl, x, family)) &&
3255 (if_id == 0 || if_id == x->if_id);
3256 }
3257
3258 /*
3259 * 0 or more than 0 is returned when validation is succeeded (either bypass
3260 * because of optional transport mode, or next index of the mathced secpath
3261 * state with the template.
3262 * -1 is returned when no matching template is found.
3263 * Otherwise "-2 - errored_index" is returned.
3264 */
3265 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family,u32 if_id)3266 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3267 unsigned short family, u32 if_id)
3268 {
3269 int idx = start;
3270
3271 if (tmpl->optional) {
3272 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3273 return start;
3274 } else
3275 start = -1;
3276 for (; idx < sp->len; idx++) {
3277 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3278 return ++idx;
3279 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3280 if (idx < sp->verified_cnt) {
3281 /* Secpath entry previously verified, consider optional and
3282 * continue searching
3283 */
3284 continue;
3285 }
3286
3287 if (start == -1)
3288 start = -2-idx;
3289 break;
3290 }
3291 }
3292 return start;
3293 }
3294
3295 static void
decode_session4(struct sk_buff * skb,struct flowi * fl,bool reverse)3296 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3297 {
3298 const struct iphdr *iph = ip_hdr(skb);
3299 int ihl = iph->ihl;
3300 u8 *xprth = skb_network_header(skb) + ihl * 4;
3301 struct flowi4 *fl4 = &fl->u.ip4;
3302 int oif = 0;
3303
3304 if (skb_dst(skb) && skb_dst(skb)->dev)
3305 oif = skb_dst(skb)->dev->ifindex;
3306
3307 memset(fl4, 0, sizeof(struct flowi4));
3308 fl4->flowi4_mark = skb->mark;
3309 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3310
3311 fl4->flowi4_proto = iph->protocol;
3312 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3313 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3314 fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
3315
3316 if (!ip_is_fragment(iph)) {
3317 switch (iph->protocol) {
3318 case IPPROTO_UDP:
3319 case IPPROTO_UDPLITE:
3320 case IPPROTO_TCP:
3321 case IPPROTO_SCTP:
3322 case IPPROTO_DCCP:
3323 if (xprth + 4 < skb->data ||
3324 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3325 __be16 *ports;
3326
3327 xprth = skb_network_header(skb) + ihl * 4;
3328 ports = (__be16 *)xprth;
3329
3330 fl4->fl4_sport = ports[!!reverse];
3331 fl4->fl4_dport = ports[!reverse];
3332 }
3333 break;
3334 case IPPROTO_ICMP:
3335 if (xprth + 2 < skb->data ||
3336 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3337 u8 *icmp;
3338
3339 xprth = skb_network_header(skb) + ihl * 4;
3340 icmp = xprth;
3341
3342 fl4->fl4_icmp_type = icmp[0];
3343 fl4->fl4_icmp_code = icmp[1];
3344 }
3345 break;
3346 case IPPROTO_ESP:
3347 if (xprth + 4 < skb->data ||
3348 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3349 __be32 *ehdr;
3350
3351 xprth = skb_network_header(skb) + ihl * 4;
3352 ehdr = (__be32 *)xprth;
3353
3354 fl4->fl4_ipsec_spi = ehdr[0];
3355 }
3356 break;
3357 case IPPROTO_AH:
3358 if (xprth + 8 < skb->data ||
3359 pskb_may_pull(skb, xprth + 8 - skb->data)) {
3360 __be32 *ah_hdr;
3361
3362 xprth = skb_network_header(skb) + ihl * 4;
3363 ah_hdr = (__be32 *)xprth;
3364
3365 fl4->fl4_ipsec_spi = ah_hdr[1];
3366 }
3367 break;
3368 case IPPROTO_COMP:
3369 if (xprth + 4 < skb->data ||
3370 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3371 __be16 *ipcomp_hdr;
3372
3373 xprth = skb_network_header(skb) + ihl * 4;
3374 ipcomp_hdr = (__be16 *)xprth;
3375
3376 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3377 }
3378 break;
3379 case IPPROTO_GRE:
3380 if (xprth + 12 < skb->data ||
3381 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3382 __be16 *greflags;
3383 __be32 *gre_hdr;
3384
3385 xprth = skb_network_header(skb) + ihl * 4;
3386 greflags = (__be16 *)xprth;
3387 gre_hdr = (__be32 *)xprth;
3388
3389 if (greflags[0] & GRE_KEY) {
3390 if (greflags[0] & GRE_CSUM)
3391 gre_hdr++;
3392 fl4->fl4_gre_key = gre_hdr[1];
3393 }
3394 }
3395 break;
3396 default:
3397 fl4->fl4_ipsec_spi = 0;
3398 break;
3399 }
3400 }
3401 }
3402
3403 #if IS_ENABLED(CONFIG_IPV6)
3404 static void
decode_session6(struct sk_buff * skb,struct flowi * fl,bool reverse)3405 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3406 {
3407 struct flowi6 *fl6 = &fl->u.ip6;
3408 int onlyproto = 0;
3409 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3410 u32 offset = sizeof(*hdr);
3411 struct ipv6_opt_hdr *exthdr;
3412 const unsigned char *nh = skb_network_header(skb);
3413 u16 nhoff = IP6CB(skb)->nhoff;
3414 int oif = 0;
3415 u8 nexthdr;
3416
3417 if (!nhoff)
3418 nhoff = offsetof(struct ipv6hdr, nexthdr);
3419
3420 nexthdr = nh[nhoff];
3421
3422 if (skb_dst(skb) && skb_dst(skb)->dev)
3423 oif = skb_dst(skb)->dev->ifindex;
3424
3425 memset(fl6, 0, sizeof(struct flowi6));
3426 fl6->flowi6_mark = skb->mark;
3427 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3428
3429 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3430 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3431
3432 while (nh + offset + sizeof(*exthdr) < skb->data ||
3433 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3434 nh = skb_network_header(skb);
3435 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3436
3437 switch (nexthdr) {
3438 case NEXTHDR_FRAGMENT:
3439 onlyproto = 1;
3440 fallthrough;
3441 case NEXTHDR_ROUTING:
3442 case NEXTHDR_HOP:
3443 case NEXTHDR_DEST:
3444 offset += ipv6_optlen(exthdr);
3445 nexthdr = exthdr->nexthdr;
3446 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3447 break;
3448 case IPPROTO_UDP:
3449 case IPPROTO_UDPLITE:
3450 case IPPROTO_TCP:
3451 case IPPROTO_SCTP:
3452 case IPPROTO_DCCP:
3453 if (!onlyproto && (nh + offset + 4 < skb->data ||
3454 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3455 __be16 *ports;
3456
3457 nh = skb_network_header(skb);
3458 ports = (__be16 *)(nh + offset);
3459 fl6->fl6_sport = ports[!!reverse];
3460 fl6->fl6_dport = ports[!reverse];
3461 }
3462 fl6->flowi6_proto = nexthdr;
3463 return;
3464 case IPPROTO_ICMPV6:
3465 if (!onlyproto && (nh + offset + 2 < skb->data ||
3466 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3467 u8 *icmp;
3468
3469 nh = skb_network_header(skb);
3470 icmp = (u8 *)(nh + offset);
3471 fl6->fl6_icmp_type = icmp[0];
3472 fl6->fl6_icmp_code = icmp[1];
3473 }
3474 fl6->flowi6_proto = nexthdr;
3475 return;
3476 case IPPROTO_GRE:
3477 if (!onlyproto &&
3478 (nh + offset + 12 < skb->data ||
3479 pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
3480 struct gre_base_hdr *gre_hdr;
3481 __be32 *gre_key;
3482
3483 nh = skb_network_header(skb);
3484 gre_hdr = (struct gre_base_hdr *)(nh + offset);
3485 gre_key = (__be32 *)(gre_hdr + 1);
3486
3487 if (gre_hdr->flags & GRE_KEY) {
3488 if (gre_hdr->flags & GRE_CSUM)
3489 gre_key++;
3490 fl6->fl6_gre_key = *gre_key;
3491 }
3492 }
3493 fl6->flowi6_proto = nexthdr;
3494 return;
3495
3496 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3497 case IPPROTO_MH:
3498 offset += ipv6_optlen(exthdr);
3499 if (!onlyproto && (nh + offset + 3 < skb->data ||
3500 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3501 struct ip6_mh *mh;
3502
3503 nh = skb_network_header(skb);
3504 mh = (struct ip6_mh *)(nh + offset);
3505 fl6->fl6_mh_type = mh->ip6mh_type;
3506 }
3507 fl6->flowi6_proto = nexthdr;
3508 return;
3509 #endif
3510 /* XXX Why are there these headers? */
3511 case IPPROTO_AH:
3512 case IPPROTO_ESP:
3513 case IPPROTO_COMP:
3514 default:
3515 fl6->fl6_ipsec_spi = 0;
3516 fl6->flowi6_proto = nexthdr;
3517 return;
3518 }
3519 }
3520 }
3521 #endif
3522
__xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3523 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3524 unsigned int family, int reverse)
3525 {
3526 switch (family) {
3527 case AF_INET:
3528 decode_session4(skb, fl, reverse);
3529 break;
3530 #if IS_ENABLED(CONFIG_IPV6)
3531 case AF_INET6:
3532 decode_session6(skb, fl, reverse);
3533 break;
3534 #endif
3535 default:
3536 return -EAFNOSUPPORT;
3537 }
3538
3539 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3540 }
3541 EXPORT_SYMBOL(__xfrm_decode_session);
3542
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3543 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3544 {
3545 for (; k < sp->len; k++) {
3546 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3547 *idxp = k;
3548 return 1;
3549 }
3550 }
3551
3552 return 0;
3553 }
3554
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3555 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3556 unsigned short family)
3557 {
3558 struct net *net = dev_net(skb->dev);
3559 struct xfrm_policy *pol;
3560 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3561 int npols = 0;
3562 int xfrm_nr;
3563 int pi;
3564 int reverse;
3565 struct flowi fl;
3566 int xerr_idx = -1;
3567 const struct xfrm_if_cb *ifcb;
3568 struct sec_path *sp;
3569 struct xfrm_if *xi;
3570 u32 if_id = 0;
3571
3572 rcu_read_lock();
3573 ifcb = xfrm_if_get_cb();
3574
3575 if (ifcb) {
3576 xi = ifcb->decode_session(skb, family);
3577 if (xi) {
3578 if_id = xi->p.if_id;
3579 net = xi->net;
3580 }
3581 }
3582 rcu_read_unlock();
3583
3584 reverse = dir & ~XFRM_POLICY_MASK;
3585 dir &= XFRM_POLICY_MASK;
3586
3587 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3588 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3589 return 0;
3590 }
3591
3592 nf_nat_decode_session(skb, &fl, family);
3593
3594 /* First, check used SA against their selectors. */
3595 sp = skb_sec_path(skb);
3596 if (sp) {
3597 int i;
3598
3599 for (i = sp->len - 1; i >= 0; i--) {
3600 struct xfrm_state *x = sp->xvec[i];
3601 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3602 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3603 return 0;
3604 }
3605 }
3606 }
3607
3608 pol = NULL;
3609 sk = sk_to_full_sk(sk);
3610 if (sk && sk->sk_policy[dir]) {
3611 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3612 if (IS_ERR(pol)) {
3613 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3614 return 0;
3615 }
3616 }
3617
3618 if (!pol)
3619 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3620
3621 if (IS_ERR(pol)) {
3622 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3623 return 0;
3624 }
3625
3626 if (!pol) {
3627 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3628 xfrm_secpath_reject(xerr_idx, skb, &fl);
3629 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3630 return 0;
3631 }
3632 return 1;
3633 }
3634
3635 pol->curlft.use_time = ktime_get_real_seconds();
3636
3637 pols[0] = pol;
3638 npols++;
3639 #ifdef CONFIG_XFRM_SUB_POLICY
3640 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3641 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3642 &fl, family,
3643 XFRM_POLICY_IN, if_id);
3644 if (pols[1]) {
3645 if (IS_ERR(pols[1])) {
3646 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3647 xfrm_pol_put(pols[0]);
3648 return 0;
3649 }
3650 pols[1]->curlft.use_time = ktime_get_real_seconds();
3651 npols++;
3652 }
3653 }
3654 #endif
3655
3656 if (pol->action == XFRM_POLICY_ALLOW) {
3657 static struct sec_path dummy;
3658 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3659 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3660 struct xfrm_tmpl **tpp = tp;
3661 int ti = 0;
3662 int i, k;
3663
3664 sp = skb_sec_path(skb);
3665 if (!sp)
3666 sp = &dummy;
3667
3668 for (pi = 0; pi < npols; pi++) {
3669 if (pols[pi] != pol &&
3670 pols[pi]->action != XFRM_POLICY_ALLOW) {
3671 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3672 goto reject;
3673 }
3674 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3675 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3676 goto reject_error;
3677 }
3678 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3679 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3680 }
3681 xfrm_nr = ti;
3682
3683 if (npols > 1) {
3684 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3685 tpp = stp;
3686 }
3687
3688 /* For each tunnel xfrm, find the first matching tmpl.
3689 * For each tmpl before that, find corresponding xfrm.
3690 * Order is _important_. Later we will implement
3691 * some barriers, but at the moment barriers
3692 * are implied between each two transformations.
3693 * Upon success, marks secpath entries as having been
3694 * verified to allow them to be skipped in future policy
3695 * checks (e.g. nested tunnels).
3696 */
3697 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3698 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3699 if (k < 0) {
3700 if (k < -1)
3701 /* "-2 - errored_index" returned */
3702 xerr_idx = -(2+k);
3703 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3704 goto reject;
3705 }
3706 }
3707
3708 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3709 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3710 goto reject;
3711 }
3712
3713 xfrm_pols_put(pols, npols);
3714 sp->verified_cnt = k;
3715
3716 return 1;
3717 }
3718 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3719
3720 reject:
3721 xfrm_secpath_reject(xerr_idx, skb, &fl);
3722 reject_error:
3723 xfrm_pols_put(pols, npols);
3724 return 0;
3725 }
3726 EXPORT_SYMBOL(__xfrm_policy_check);
3727
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3728 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3729 {
3730 struct net *net = dev_net(skb->dev);
3731 struct flowi fl;
3732 struct dst_entry *dst;
3733 int res = 1;
3734
3735 if (xfrm_decode_session(skb, &fl, family) < 0) {
3736 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3737 return 0;
3738 }
3739
3740 skb_dst_force(skb);
3741 if (!skb_dst(skb)) {
3742 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3743 return 0;
3744 }
3745
3746 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3747 if (IS_ERR(dst)) {
3748 res = 0;
3749 dst = NULL;
3750 }
3751 skb_dst_set(skb, dst);
3752 return res;
3753 }
3754 EXPORT_SYMBOL(__xfrm_route_forward);
3755
3756 /* Optimize later using cookies and generation ids. */
3757
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3758 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3759 {
3760 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3761 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3762 * get validated by dst_ops->check on every use. We do this
3763 * because when a normal route referenced by an XFRM dst is
3764 * obsoleted we do not go looking around for all parent
3765 * referencing XFRM dsts so that we can invalidate them. It
3766 * is just too much work. Instead we make the checks here on
3767 * every use. For example:
3768 *
3769 * XFRM dst A --> IPv4 dst X
3770 *
3771 * X is the "xdst->route" of A (X is also the "dst->path" of A
3772 * in this example). If X is marked obsolete, "A" will not
3773 * notice. That's what we are validating here via the
3774 * stale_bundle() check.
3775 *
3776 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3777 * be marked on it.
3778 * This will force stale_bundle() to fail on any xdst bundle with
3779 * this dst linked in it.
3780 */
3781 if (dst->obsolete < 0 && !stale_bundle(dst))
3782 return dst;
3783
3784 return NULL;
3785 }
3786
stale_bundle(struct dst_entry * dst)3787 static int stale_bundle(struct dst_entry *dst)
3788 {
3789 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3790 }
3791
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3792 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3793 {
3794 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3795 dst->dev = dev_net(dev)->loopback_dev;
3796 dev_hold(dst->dev);
3797 dev_put(dev);
3798 }
3799 }
3800 EXPORT_SYMBOL(xfrm_dst_ifdown);
3801
xfrm_link_failure(struct sk_buff * skb)3802 static void xfrm_link_failure(struct sk_buff *skb)
3803 {
3804 /* Impossible. Such dst must be popped before reaches point of failure. */
3805 }
3806
xfrm_negative_advice(struct dst_entry * dst)3807 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3808 {
3809 if (dst) {
3810 if (dst->obsolete) {
3811 dst_release(dst);
3812 dst = NULL;
3813 }
3814 }
3815 return dst;
3816 }
3817
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3818 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3819 {
3820 while (nr--) {
3821 struct xfrm_dst *xdst = bundle[nr];
3822 u32 pmtu, route_mtu_cached;
3823 struct dst_entry *dst;
3824
3825 dst = &xdst->u.dst;
3826 pmtu = dst_mtu(xfrm_dst_child(dst));
3827 xdst->child_mtu_cached = pmtu;
3828
3829 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3830
3831 route_mtu_cached = dst_mtu(xdst->route);
3832 xdst->route_mtu_cached = route_mtu_cached;
3833
3834 if (pmtu > route_mtu_cached)
3835 pmtu = route_mtu_cached;
3836
3837 dst_metric_set(dst, RTAX_MTU, pmtu);
3838 }
3839 }
3840
3841 /* Check that the bundle accepts the flow and its components are
3842 * still valid.
3843 */
3844
xfrm_bundle_ok(struct xfrm_dst * first)3845 static int xfrm_bundle_ok(struct xfrm_dst *first)
3846 {
3847 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3848 struct dst_entry *dst = &first->u.dst;
3849 struct xfrm_dst *xdst;
3850 int start_from, nr;
3851 u32 mtu;
3852
3853 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3854 (dst->dev && !netif_running(dst->dev)))
3855 return 0;
3856
3857 if (dst->flags & DST_XFRM_QUEUE)
3858 return 1;
3859
3860 start_from = nr = 0;
3861 do {
3862 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3863
3864 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3865 return 0;
3866 if (xdst->xfrm_genid != dst->xfrm->genid)
3867 return 0;
3868 if (xdst->num_pols > 0 &&
3869 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3870 return 0;
3871
3872 bundle[nr++] = xdst;
3873
3874 mtu = dst_mtu(xfrm_dst_child(dst));
3875 if (xdst->child_mtu_cached != mtu) {
3876 start_from = nr;
3877 xdst->child_mtu_cached = mtu;
3878 }
3879
3880 if (!dst_check(xdst->route, xdst->route_cookie))
3881 return 0;
3882 mtu = dst_mtu(xdst->route);
3883 if (xdst->route_mtu_cached != mtu) {
3884 start_from = nr;
3885 xdst->route_mtu_cached = mtu;
3886 }
3887
3888 dst = xfrm_dst_child(dst);
3889 } while (dst->xfrm);
3890
3891 if (likely(!start_from))
3892 return 1;
3893
3894 xdst = bundle[start_from - 1];
3895 mtu = xdst->child_mtu_cached;
3896 while (start_from--) {
3897 dst = &xdst->u.dst;
3898
3899 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3900 if (mtu > xdst->route_mtu_cached)
3901 mtu = xdst->route_mtu_cached;
3902 dst_metric_set(dst, RTAX_MTU, mtu);
3903 if (!start_from)
3904 break;
3905
3906 xdst = bundle[start_from - 1];
3907 xdst->child_mtu_cached = mtu;
3908 }
3909
3910 return 1;
3911 }
3912
xfrm_default_advmss(const struct dst_entry * dst)3913 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3914 {
3915 return dst_metric_advmss(xfrm_dst_path(dst));
3916 }
3917
xfrm_mtu(const struct dst_entry * dst)3918 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3919 {
3920 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3921
3922 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3923 }
3924
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)3925 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3926 const void *daddr)
3927 {
3928 while (dst->xfrm) {
3929 const struct xfrm_state *xfrm = dst->xfrm;
3930
3931 dst = xfrm_dst_child(dst);
3932
3933 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3934 continue;
3935 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3936 daddr = xfrm->coaddr;
3937 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3938 daddr = &xfrm->id.daddr;
3939 }
3940 return daddr;
3941 }
3942
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)3943 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3944 struct sk_buff *skb,
3945 const void *daddr)
3946 {
3947 const struct dst_entry *path = xfrm_dst_path(dst);
3948
3949 if (!skb)
3950 daddr = xfrm_get_dst_nexthop(dst, daddr);
3951 return path->ops->neigh_lookup(path, skb, daddr);
3952 }
3953
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)3954 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3955 {
3956 const struct dst_entry *path = xfrm_dst_path(dst);
3957
3958 daddr = xfrm_get_dst_nexthop(dst, daddr);
3959 path->ops->confirm_neigh(path, daddr);
3960 }
3961
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)3962 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3963 {
3964 int err = 0;
3965
3966 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3967 return -EAFNOSUPPORT;
3968
3969 spin_lock(&xfrm_policy_afinfo_lock);
3970 if (unlikely(xfrm_policy_afinfo[family] != NULL))
3971 err = -EEXIST;
3972 else {
3973 struct dst_ops *dst_ops = afinfo->dst_ops;
3974 if (likely(dst_ops->kmem_cachep == NULL))
3975 dst_ops->kmem_cachep = xfrm_dst_cache;
3976 if (likely(dst_ops->check == NULL))
3977 dst_ops->check = xfrm_dst_check;
3978 if (likely(dst_ops->default_advmss == NULL))
3979 dst_ops->default_advmss = xfrm_default_advmss;
3980 if (likely(dst_ops->mtu == NULL))
3981 dst_ops->mtu = xfrm_mtu;
3982 if (likely(dst_ops->negative_advice == NULL))
3983 dst_ops->negative_advice = xfrm_negative_advice;
3984 if (likely(dst_ops->link_failure == NULL))
3985 dst_ops->link_failure = xfrm_link_failure;
3986 if (likely(dst_ops->neigh_lookup == NULL))
3987 dst_ops->neigh_lookup = xfrm_neigh_lookup;
3988 if (likely(!dst_ops->confirm_neigh))
3989 dst_ops->confirm_neigh = xfrm_confirm_neigh;
3990 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3991 }
3992 spin_unlock(&xfrm_policy_afinfo_lock);
3993
3994 return err;
3995 }
3996 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3997
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)3998 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3999 {
4000 struct dst_ops *dst_ops = afinfo->dst_ops;
4001 int i;
4002
4003 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4004 if (xfrm_policy_afinfo[i] != afinfo)
4005 continue;
4006 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4007 break;
4008 }
4009
4010 synchronize_rcu();
4011
4012 dst_ops->kmem_cachep = NULL;
4013 dst_ops->check = NULL;
4014 dst_ops->negative_advice = NULL;
4015 dst_ops->link_failure = NULL;
4016 }
4017 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4018
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)4019 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4020 {
4021 spin_lock(&xfrm_if_cb_lock);
4022 rcu_assign_pointer(xfrm_if_cb, ifcb);
4023 spin_unlock(&xfrm_if_cb_lock);
4024 }
4025 EXPORT_SYMBOL(xfrm_if_register_cb);
4026
xfrm_if_unregister_cb(void)4027 void xfrm_if_unregister_cb(void)
4028 {
4029 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4030 synchronize_rcu();
4031 }
4032 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4033
4034 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)4035 static int __net_init xfrm_statistics_init(struct net *net)
4036 {
4037 int rv;
4038 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4039 if (!net->mib.xfrm_statistics)
4040 return -ENOMEM;
4041 rv = xfrm_proc_init(net);
4042 if (rv < 0)
4043 free_percpu(net->mib.xfrm_statistics);
4044 return rv;
4045 }
4046
xfrm_statistics_fini(struct net * net)4047 static void xfrm_statistics_fini(struct net *net)
4048 {
4049 xfrm_proc_fini(net);
4050 free_percpu(net->mib.xfrm_statistics);
4051 }
4052 #else
xfrm_statistics_init(struct net * net)4053 static int __net_init xfrm_statistics_init(struct net *net)
4054 {
4055 return 0;
4056 }
4057
xfrm_statistics_fini(struct net * net)4058 static void xfrm_statistics_fini(struct net *net)
4059 {
4060 }
4061 #endif
4062
xfrm_policy_init(struct net * net)4063 static int __net_init xfrm_policy_init(struct net *net)
4064 {
4065 unsigned int hmask, sz;
4066 int dir, err;
4067
4068 if (net_eq(net, &init_net)) {
4069 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4070 sizeof(struct xfrm_dst),
4071 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4072 NULL);
4073 err = rhashtable_init(&xfrm_policy_inexact_table,
4074 &xfrm_pol_inexact_params);
4075 BUG_ON(err);
4076 }
4077
4078 hmask = 8 - 1;
4079 sz = (hmask+1) * sizeof(struct hlist_head);
4080
4081 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4082 if (!net->xfrm.policy_byidx)
4083 goto out_byidx;
4084 net->xfrm.policy_idx_hmask = hmask;
4085
4086 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4087 struct xfrm_policy_hash *htab;
4088
4089 net->xfrm.policy_count[dir] = 0;
4090 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4091 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4092
4093 htab = &net->xfrm.policy_bydst[dir];
4094 htab->table = xfrm_hash_alloc(sz);
4095 if (!htab->table)
4096 goto out_bydst;
4097 htab->hmask = hmask;
4098 htab->dbits4 = 32;
4099 htab->sbits4 = 32;
4100 htab->dbits6 = 128;
4101 htab->sbits6 = 128;
4102 }
4103 net->xfrm.policy_hthresh.lbits4 = 32;
4104 net->xfrm.policy_hthresh.rbits4 = 32;
4105 net->xfrm.policy_hthresh.lbits6 = 128;
4106 net->xfrm.policy_hthresh.rbits6 = 128;
4107
4108 seqlock_init(&net->xfrm.policy_hthresh.lock);
4109
4110 INIT_LIST_HEAD(&net->xfrm.policy_all);
4111 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4112 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4113 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4114 return 0;
4115
4116 out_bydst:
4117 for (dir--; dir >= 0; dir--) {
4118 struct xfrm_policy_hash *htab;
4119
4120 htab = &net->xfrm.policy_bydst[dir];
4121 xfrm_hash_free(htab->table, sz);
4122 }
4123 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4124 out_byidx:
4125 return -ENOMEM;
4126 }
4127
xfrm_policy_fini(struct net * net)4128 static void xfrm_policy_fini(struct net *net)
4129 {
4130 struct xfrm_pol_inexact_bin *b, *t;
4131 unsigned int sz;
4132 int dir;
4133
4134 flush_work(&net->xfrm.policy_hash_work);
4135 #ifdef CONFIG_XFRM_SUB_POLICY
4136 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4137 #endif
4138 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4139
4140 WARN_ON(!list_empty(&net->xfrm.policy_all));
4141
4142 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4143 struct xfrm_policy_hash *htab;
4144
4145 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4146
4147 htab = &net->xfrm.policy_bydst[dir];
4148 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4149 WARN_ON(!hlist_empty(htab->table));
4150 xfrm_hash_free(htab->table, sz);
4151 }
4152
4153 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4154 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4155 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4156
4157 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4158 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4159 __xfrm_policy_inexact_prune_bin(b, true);
4160 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4161 }
4162
xfrm_net_init(struct net * net)4163 static int __net_init xfrm_net_init(struct net *net)
4164 {
4165 int rv;
4166
4167 /* Initialize the per-net locks here */
4168 spin_lock_init(&net->xfrm.xfrm_state_lock);
4169 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4170 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4171
4172 rv = xfrm_statistics_init(net);
4173 if (rv < 0)
4174 goto out_statistics;
4175 rv = xfrm_state_init(net);
4176 if (rv < 0)
4177 goto out_state;
4178 rv = xfrm_policy_init(net);
4179 if (rv < 0)
4180 goto out_policy;
4181 rv = xfrm_sysctl_init(net);
4182 if (rv < 0)
4183 goto out_sysctl;
4184
4185 return 0;
4186
4187 out_sysctl:
4188 xfrm_policy_fini(net);
4189 out_policy:
4190 xfrm_state_fini(net);
4191 out_state:
4192 xfrm_statistics_fini(net);
4193 out_statistics:
4194 return rv;
4195 }
4196
xfrm_net_exit(struct net * net)4197 static void __net_exit xfrm_net_exit(struct net *net)
4198 {
4199 xfrm_sysctl_fini(net);
4200 xfrm_policy_fini(net);
4201 xfrm_state_fini(net);
4202 xfrm_statistics_fini(net);
4203 }
4204
4205 static struct pernet_operations __net_initdata xfrm_net_ops = {
4206 .init = xfrm_net_init,
4207 .exit = xfrm_net_exit,
4208 };
4209
xfrm_init(void)4210 void __init xfrm_init(void)
4211 {
4212 register_pernet_subsys(&xfrm_net_ops);
4213 xfrm_dev_init();
4214 seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
4215 xfrm_input_init();
4216
4217 #ifdef CONFIG_XFRM_ESPINTCP
4218 espintcp_init();
4219 #endif
4220
4221 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4222 synchronize_rcu();
4223 }
4224
4225 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4226 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4227 struct audit_buffer *audit_buf)
4228 {
4229 struct xfrm_sec_ctx *ctx = xp->security;
4230 struct xfrm_selector *sel = &xp->selector;
4231
4232 if (ctx)
4233 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4234 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4235
4236 switch (sel->family) {
4237 case AF_INET:
4238 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4239 if (sel->prefixlen_s != 32)
4240 audit_log_format(audit_buf, " src_prefixlen=%d",
4241 sel->prefixlen_s);
4242 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4243 if (sel->prefixlen_d != 32)
4244 audit_log_format(audit_buf, " dst_prefixlen=%d",
4245 sel->prefixlen_d);
4246 break;
4247 case AF_INET6:
4248 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4249 if (sel->prefixlen_s != 128)
4250 audit_log_format(audit_buf, " src_prefixlen=%d",
4251 sel->prefixlen_s);
4252 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4253 if (sel->prefixlen_d != 128)
4254 audit_log_format(audit_buf, " dst_prefixlen=%d",
4255 sel->prefixlen_d);
4256 break;
4257 }
4258 }
4259
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4260 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4261 {
4262 struct audit_buffer *audit_buf;
4263
4264 audit_buf = xfrm_audit_start("SPD-add");
4265 if (audit_buf == NULL)
4266 return;
4267 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4268 audit_log_format(audit_buf, " res=%u", result);
4269 xfrm_audit_common_policyinfo(xp, audit_buf);
4270 audit_log_end(audit_buf);
4271 }
4272 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4273
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4274 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4275 bool task_valid)
4276 {
4277 struct audit_buffer *audit_buf;
4278
4279 audit_buf = xfrm_audit_start("SPD-delete");
4280 if (audit_buf == NULL)
4281 return;
4282 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4283 audit_log_format(audit_buf, " res=%u", result);
4284 xfrm_audit_common_policyinfo(xp, audit_buf);
4285 audit_log_end(audit_buf);
4286 }
4287 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4288 #endif
4289
4290 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_selector_match(const struct xfrm_selector * sel_cmp,const struct xfrm_selector * sel_tgt)4291 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4292 const struct xfrm_selector *sel_tgt)
4293 {
4294 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4295 if (sel_tgt->family == sel_cmp->family &&
4296 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4297 sel_cmp->family) &&
4298 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4299 sel_cmp->family) &&
4300 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4301 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4302 return true;
4303 }
4304 } else {
4305 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4306 return true;
4307 }
4308 }
4309 return false;
4310 }
4311
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net,u32 if_id)4312 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4313 u8 dir, u8 type, struct net *net, u32 if_id)
4314 {
4315 struct xfrm_policy *pol, *ret = NULL;
4316 struct hlist_head *chain;
4317 u32 priority = ~0U;
4318
4319 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4320 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4321 hlist_for_each_entry(pol, chain, bydst) {
4322 if ((if_id == 0 || pol->if_id == if_id) &&
4323 xfrm_migrate_selector_match(sel, &pol->selector) &&
4324 pol->type == type) {
4325 ret = pol;
4326 priority = ret->priority;
4327 break;
4328 }
4329 }
4330 chain = &net->xfrm.policy_inexact[dir];
4331 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4332 if ((pol->priority >= priority) && ret)
4333 break;
4334
4335 if ((if_id == 0 || pol->if_id == if_id) &&
4336 xfrm_migrate_selector_match(sel, &pol->selector) &&
4337 pol->type == type) {
4338 ret = pol;
4339 break;
4340 }
4341 }
4342
4343 xfrm_pol_hold(ret);
4344
4345 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4346
4347 return ret;
4348 }
4349
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4350 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4351 {
4352 int match = 0;
4353
4354 if (t->mode == m->mode && t->id.proto == m->proto &&
4355 (m->reqid == 0 || t->reqid == m->reqid)) {
4356 switch (t->mode) {
4357 case XFRM_MODE_TUNNEL:
4358 case XFRM_MODE_BEET:
4359 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4360 m->old_family) &&
4361 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4362 m->old_family)) {
4363 match = 1;
4364 }
4365 break;
4366 case XFRM_MODE_TRANSPORT:
4367 /* in case of transport mode, template does not store
4368 any IP addresses, hence we just compare mode and
4369 protocol */
4370 match = 1;
4371 break;
4372 default:
4373 break;
4374 }
4375 }
4376 return match;
4377 }
4378
4379 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate)4380 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4381 struct xfrm_migrate *m, int num_migrate)
4382 {
4383 struct xfrm_migrate *mp;
4384 int i, j, n = 0;
4385
4386 write_lock_bh(&pol->lock);
4387 if (unlikely(pol->walk.dead)) {
4388 /* target policy has been deleted */
4389 write_unlock_bh(&pol->lock);
4390 return -ENOENT;
4391 }
4392
4393 for (i = 0; i < pol->xfrm_nr; i++) {
4394 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4395 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4396 continue;
4397 n++;
4398 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4399 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4400 continue;
4401 /* update endpoints */
4402 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4403 sizeof(pol->xfrm_vec[i].id.daddr));
4404 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4405 sizeof(pol->xfrm_vec[i].saddr));
4406 pol->xfrm_vec[i].encap_family = mp->new_family;
4407 /* flush bundles */
4408 atomic_inc(&pol->genid);
4409 }
4410 }
4411
4412 write_unlock_bh(&pol->lock);
4413
4414 if (!n)
4415 return -ENODATA;
4416
4417 return 0;
4418 }
4419
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate)4420 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4421 {
4422 int i, j;
4423
4424 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4425 return -EINVAL;
4426
4427 for (i = 0; i < num_migrate; i++) {
4428 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4429 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4430 return -EINVAL;
4431
4432 /* check if there is any duplicated entry */
4433 for (j = i + 1; j < num_migrate; j++) {
4434 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4435 sizeof(m[i].old_daddr)) &&
4436 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4437 sizeof(m[i].old_saddr)) &&
4438 m[i].proto == m[j].proto &&
4439 m[i].mode == m[j].mode &&
4440 m[i].reqid == m[j].reqid &&
4441 m[i].old_family == m[j].old_family)
4442 return -EINVAL;
4443 }
4444 }
4445
4446 return 0;
4447 }
4448
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap,u32 if_id)4449 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4450 struct xfrm_migrate *m, int num_migrate,
4451 struct xfrm_kmaddress *k, struct net *net,
4452 struct xfrm_encap_tmpl *encap, u32 if_id)
4453 {
4454 int i, err, nx_cur = 0, nx_new = 0;
4455 struct xfrm_policy *pol = NULL;
4456 struct xfrm_state *x, *xc;
4457 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4458 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4459 struct xfrm_migrate *mp;
4460
4461 /* Stage 0 - sanity checks */
4462 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4463 goto out;
4464
4465 if (dir >= XFRM_POLICY_MAX) {
4466 err = -EINVAL;
4467 goto out;
4468 }
4469
4470 /* Stage 1 - find policy */
4471 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
4472 err = -ENOENT;
4473 goto out;
4474 }
4475
4476 /* Stage 2 - find and update state(s) */
4477 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4478 if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4479 x_cur[nx_cur] = x;
4480 nx_cur++;
4481 xc = xfrm_state_migrate(x, mp, encap);
4482 if (xc) {
4483 x_new[nx_new] = xc;
4484 nx_new++;
4485 } else {
4486 err = -ENODATA;
4487 goto restore_state;
4488 }
4489 }
4490 }
4491
4492 /* Stage 3 - update policy */
4493 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4494 goto restore_state;
4495
4496 /* Stage 4 - delete old state(s) */
4497 if (nx_cur) {
4498 xfrm_states_put(x_cur, nx_cur);
4499 xfrm_states_delete(x_cur, nx_cur);
4500 }
4501
4502 /* Stage 5 - announce */
4503 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4504
4505 xfrm_pol_put(pol);
4506
4507 return 0;
4508 out:
4509 return err;
4510
4511 restore_state:
4512 if (pol)
4513 xfrm_pol_put(pol);
4514 if (nx_cur)
4515 xfrm_states_put(x_cur, nx_cur);
4516 if (nx_new)
4517 xfrm_states_delete(x_new, nx_new);
4518
4519 return err;
4520 }
4521 EXPORT_SYMBOL(xfrm_migrate);
4522 #endif
4523