1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <linux/icmp.h>
33 #include <net/dst.h>
34 #include <net/flow.h>
35 #include <net/inet_ecn.h>
36 #include <net/xfrm.h>
37 #include <net/ip.h>
38 #include <net/gre.h>
39 #if IS_ENABLED(CONFIG_IPV6_MIP6)
40 #include <net/mip6.h>
41 #endif
42 #ifdef CONFIG_XFRM_STATISTICS
43 #include <net/snmp.h>
44 #endif
45 #ifdef CONFIG_XFRM_ESPINTCP
46 #include <net/espintcp.h>
47 #endif
48 #include <net/inet_dscp.h>
49
50 #include "xfrm_hash.h"
51
52 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
53 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
54 #define XFRM_MAX_QUEUE_LEN 100
55
56 struct xfrm_flo {
57 struct dst_entry *dst_orig;
58 u8 flags;
59 };
60
61 /* prefixes smaller than this are stored in lists, not trees. */
62 #define INEXACT_PREFIXLEN_IPV4 16
63 #define INEXACT_PREFIXLEN_IPV6 48
64
65 struct xfrm_pol_inexact_node {
66 struct rb_node node;
67 union {
68 xfrm_address_t addr;
69 struct rcu_head rcu;
70 };
71 u8 prefixlen;
72
73 struct rb_root root;
74
75 /* the policies matching this node, can be empty list */
76 struct hlist_head hhead;
77 };
78
79 /* xfrm inexact policy search tree:
80 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
81 * |
82 * +---- root_d: sorted by daddr:prefix
83 * | |
84 * | xfrm_pol_inexact_node
85 * | |
86 * | +- root: sorted by saddr/prefix
87 * | | |
88 * | | xfrm_pol_inexact_node
89 * | | |
90 * | | + root: unused
91 * | | |
92 * | | + hhead: saddr:daddr policies
93 * | |
94 * | +- coarse policies and all any:daddr policies
95 * |
96 * +---- root_s: sorted by saddr:prefix
97 * | |
98 * | xfrm_pol_inexact_node
99 * | |
100 * | + root: unused
101 * | |
102 * | + hhead: saddr:any policies
103 * |
104 * +---- coarse policies and all any:any policies
105 *
106 * Lookups return four candidate lists:
107 * 1. any:any list from top-level xfrm_pol_inexact_bin
108 * 2. any:daddr list from daddr tree
109 * 3. saddr:daddr list from 2nd level daddr tree
110 * 4. saddr:any list from saddr tree
111 *
112 * This result set then needs to be searched for the policy with
113 * the lowest priority. If two candidates have the same priority, the
114 * struct xfrm_policy pos member with the lower number is used.
115 *
116 * This replicates previous single-list-search algorithm which would
117 * return first matching policy in the (ordered-by-priority) list.
118 */
119
120 struct xfrm_pol_inexact_key {
121 possible_net_t net;
122 u32 if_id;
123 u16 family;
124 u8 dir, type;
125 };
126
127 struct xfrm_pol_inexact_bin {
128 struct xfrm_pol_inexact_key k;
129 struct rhash_head head;
130 /* list containing '*:*' policies */
131 struct hlist_head hhead;
132
133 seqcount_spinlock_t count;
134 /* tree sorted by daddr/prefix */
135 struct rb_root root_d;
136
137 /* tree sorted by saddr/prefix */
138 struct rb_root root_s;
139
140 /* slow path below */
141 struct list_head inexact_bins;
142 struct rcu_head rcu;
143 };
144
145 enum xfrm_pol_inexact_candidate_type {
146 XFRM_POL_CAND_BOTH,
147 XFRM_POL_CAND_SADDR,
148 XFRM_POL_CAND_DADDR,
149 XFRM_POL_CAND_ANY,
150
151 XFRM_POL_CAND_MAX,
152 };
153
154 struct xfrm_pol_inexact_candidates {
155 struct hlist_head *res[XFRM_POL_CAND_MAX];
156 };
157
158 struct xfrm_flow_keys {
159 struct flow_dissector_key_basic basic;
160 struct flow_dissector_key_control control;
161 union {
162 struct flow_dissector_key_ipv4_addrs ipv4;
163 struct flow_dissector_key_ipv6_addrs ipv6;
164 } addrs;
165 struct flow_dissector_key_ip ip;
166 struct flow_dissector_key_icmp icmp;
167 struct flow_dissector_key_ports ports;
168 struct flow_dissector_key_keyid gre;
169 };
170
171 static struct flow_dissector xfrm_session_dissector __ro_after_init;
172
173 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
174 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
175
176 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
177 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
178 __read_mostly;
179
180 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
181
182 static struct rhashtable xfrm_policy_inexact_table;
183 static const struct rhashtable_params xfrm_pol_inexact_params;
184
185 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
186 static int stale_bundle(struct dst_entry *dst);
187 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
188 static void xfrm_policy_queue_process(struct timer_list *t);
189
190 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
191 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
192 int dir);
193
194 static struct xfrm_pol_inexact_bin *
195 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
196 u32 if_id);
197
198 static struct xfrm_pol_inexact_bin *
199 xfrm_policy_inexact_lookup_rcu(struct net *net,
200 u8 type, u16 family, u8 dir, u32 if_id);
201 static struct xfrm_policy *
202 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
203 bool excl);
204
205 static bool
206 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
207 struct xfrm_pol_inexact_bin *b,
208 const xfrm_address_t *saddr,
209 const xfrm_address_t *daddr);
210
xfrm_pol_hold_rcu(struct xfrm_policy * policy)211 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
212 {
213 return refcount_inc_not_zero(&policy->refcnt);
214 }
215
216 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)217 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
218 {
219 const struct flowi4 *fl4 = &fl->u.ip4;
220
221 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
222 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
223 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
224 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
225 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
226 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
227 }
228
229 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)230 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
231 {
232 const struct flowi6 *fl6 = &fl->u.ip6;
233
234 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
235 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
236 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
237 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
238 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
239 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
240 }
241
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)242 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
243 unsigned short family)
244 {
245 switch (family) {
246 case AF_INET:
247 return __xfrm4_selector_match(sel, fl);
248 case AF_INET6:
249 return __xfrm6_selector_match(sel, fl);
250 }
251 return false;
252 }
253
xfrm_policy_get_afinfo(unsigned short family)254 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
255 {
256 const struct xfrm_policy_afinfo *afinfo;
257
258 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
259 return NULL;
260 rcu_read_lock();
261 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
262 if (unlikely(!afinfo))
263 rcu_read_unlock();
264 return afinfo;
265 }
266
267 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)268 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
269 {
270 return rcu_dereference(xfrm_if_cb);
271 }
272
__xfrm_dst_lookup(int family,const struct xfrm_dst_lookup_params * params)273 struct dst_entry *__xfrm_dst_lookup(int family,
274 const struct xfrm_dst_lookup_params *params)
275 {
276 const struct xfrm_policy_afinfo *afinfo;
277 struct dst_entry *dst;
278
279 afinfo = xfrm_policy_get_afinfo(family);
280 if (unlikely(afinfo == NULL))
281 return ERR_PTR(-EAFNOSUPPORT);
282
283 dst = afinfo->dst_lookup(params);
284
285 rcu_read_unlock();
286
287 return dst;
288 }
289 EXPORT_SYMBOL(__xfrm_dst_lookup);
290
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)291 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
292 int tos, int oif,
293 xfrm_address_t *prev_saddr,
294 xfrm_address_t *prev_daddr,
295 int family, u32 mark)
296 {
297 struct xfrm_dst_lookup_params params;
298 struct net *net = xs_net(x);
299 xfrm_address_t *saddr = &x->props.saddr;
300 xfrm_address_t *daddr = &x->id.daddr;
301 struct dst_entry *dst;
302
303 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
304 saddr = x->coaddr;
305 daddr = prev_daddr;
306 }
307 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
308 saddr = prev_saddr;
309 daddr = x->coaddr;
310 }
311
312 params.net = net;
313 params.saddr = saddr;
314 params.daddr = daddr;
315 params.tos = tos;
316 params.oif = oif;
317 params.mark = mark;
318 params.ipproto = x->id.proto;
319 if (x->encap) {
320 switch (x->encap->encap_type) {
321 case UDP_ENCAP_ESPINUDP:
322 params.ipproto = IPPROTO_UDP;
323 params.uli.ports.sport = x->encap->encap_sport;
324 params.uli.ports.dport = x->encap->encap_dport;
325 break;
326 case TCP_ENCAP_ESPINTCP:
327 params.ipproto = IPPROTO_TCP;
328 params.uli.ports.sport = x->encap->encap_sport;
329 params.uli.ports.dport = x->encap->encap_dport;
330 break;
331 }
332 }
333
334 dst = __xfrm_dst_lookup(family, ¶ms);
335
336 if (!IS_ERR(dst)) {
337 if (prev_saddr != saddr)
338 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
339 if (prev_daddr != daddr)
340 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
341 }
342
343 return dst;
344 }
345
make_jiffies(long secs)346 static inline unsigned long make_jiffies(long secs)
347 {
348 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
349 return MAX_SCHEDULE_TIMEOUT-1;
350 else
351 return secs*HZ;
352 }
353
xfrm_policy_timer(struct timer_list * t)354 static void xfrm_policy_timer(struct timer_list *t)
355 {
356 struct xfrm_policy *xp = from_timer(xp, t, timer);
357 time64_t now = ktime_get_real_seconds();
358 time64_t next = TIME64_MAX;
359 int warn = 0;
360 int dir;
361
362 read_lock(&xp->lock);
363
364 if (unlikely(xp->walk.dead))
365 goto out;
366
367 dir = xfrm_policy_id2dir(xp->index);
368
369 if (xp->lft.hard_add_expires_seconds) {
370 time64_t tmo = xp->lft.hard_add_expires_seconds +
371 xp->curlft.add_time - now;
372 if (tmo <= 0)
373 goto expired;
374 if (tmo < next)
375 next = tmo;
376 }
377 if (xp->lft.hard_use_expires_seconds) {
378 time64_t tmo = xp->lft.hard_use_expires_seconds +
379 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
380 if (tmo <= 0)
381 goto expired;
382 if (tmo < next)
383 next = tmo;
384 }
385 if (xp->lft.soft_add_expires_seconds) {
386 time64_t tmo = xp->lft.soft_add_expires_seconds +
387 xp->curlft.add_time - now;
388 if (tmo <= 0) {
389 warn = 1;
390 tmo = XFRM_KM_TIMEOUT;
391 }
392 if (tmo < next)
393 next = tmo;
394 }
395 if (xp->lft.soft_use_expires_seconds) {
396 time64_t tmo = xp->lft.soft_use_expires_seconds +
397 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
398 if (tmo <= 0) {
399 warn = 1;
400 tmo = XFRM_KM_TIMEOUT;
401 }
402 if (tmo < next)
403 next = tmo;
404 }
405
406 if (warn)
407 km_policy_expired(xp, dir, 0, 0);
408 if (next != TIME64_MAX &&
409 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
410 xfrm_pol_hold(xp);
411
412 out:
413 read_unlock(&xp->lock);
414 xfrm_pol_put(xp);
415 return;
416
417 expired:
418 read_unlock(&xp->lock);
419 if (!xfrm_policy_delete(xp, dir))
420 km_policy_expired(xp, dir, 1, 0);
421 xfrm_pol_put(xp);
422 }
423
424 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
425 * SPD calls.
426 */
427
xfrm_policy_alloc(struct net * net,gfp_t gfp)428 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
429 {
430 struct xfrm_policy *policy;
431
432 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
433
434 if (policy) {
435 write_pnet(&policy->xp_net, net);
436 INIT_LIST_HEAD(&policy->walk.all);
437 INIT_HLIST_HEAD(&policy->state_cache_list);
438 INIT_HLIST_NODE(&policy->bydst);
439 INIT_HLIST_NODE(&policy->byidx);
440 rwlock_init(&policy->lock);
441 refcount_set(&policy->refcnt, 1);
442 skb_queue_head_init(&policy->polq.hold_queue);
443 timer_setup(&policy->timer, xfrm_policy_timer, 0);
444 timer_setup(&policy->polq.hold_timer,
445 xfrm_policy_queue_process, 0);
446 }
447 return policy;
448 }
449 EXPORT_SYMBOL(xfrm_policy_alloc);
450
xfrm_policy_destroy_rcu(struct rcu_head * head)451 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
452 {
453 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
454
455 security_xfrm_policy_free(policy->security);
456 kfree(policy);
457 }
458
459 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
460
xfrm_policy_destroy(struct xfrm_policy * policy)461 void xfrm_policy_destroy(struct xfrm_policy *policy)
462 {
463 BUG_ON(!policy->walk.dead);
464
465 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
466 BUG();
467
468 xfrm_dev_policy_free(policy);
469 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
470 }
471 EXPORT_SYMBOL(xfrm_policy_destroy);
472
473 /* Rule must be locked. Release descendant resources, announce
474 * entry dead. The rule must be unlinked from lists to the moment.
475 */
476
xfrm_policy_kill(struct xfrm_policy * policy)477 static void xfrm_policy_kill(struct xfrm_policy *policy)
478 {
479 struct net *net = xp_net(policy);
480 struct xfrm_state *x;
481
482 xfrm_dev_policy_delete(policy);
483
484 write_lock_bh(&policy->lock);
485 policy->walk.dead = 1;
486 write_unlock_bh(&policy->lock);
487
488 atomic_inc(&policy->genid);
489
490 if (del_timer(&policy->polq.hold_timer))
491 xfrm_pol_put(policy);
492 skb_queue_purge(&policy->polq.hold_queue);
493
494 if (del_timer(&policy->timer))
495 xfrm_pol_put(policy);
496
497 /* XXX: Flush state cache */
498 spin_lock_bh(&net->xfrm.xfrm_state_lock);
499 hlist_for_each_entry_rcu(x, &policy->state_cache_list, state_cache) {
500 hlist_del_init_rcu(&x->state_cache);
501 }
502 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
503
504 xfrm_pol_put(policy);
505 }
506
507 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
508
idx_hash(struct net * net,u32 index)509 static inline unsigned int idx_hash(struct net *net, u32 index)
510 {
511 return __idx_hash(index, net->xfrm.policy_idx_hmask);
512 }
513
514 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)515 static void __get_hash_thresh(struct net *net,
516 unsigned short family, int dir,
517 u8 *dbits, u8 *sbits)
518 {
519 switch (family) {
520 case AF_INET:
521 *dbits = net->xfrm.policy_bydst[dir].dbits4;
522 *sbits = net->xfrm.policy_bydst[dir].sbits4;
523 break;
524
525 case AF_INET6:
526 *dbits = net->xfrm.policy_bydst[dir].dbits6;
527 *sbits = net->xfrm.policy_bydst[dir].sbits6;
528 break;
529
530 default:
531 *dbits = 0;
532 *sbits = 0;
533 }
534 }
535
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)536 static struct hlist_head *policy_hash_bysel(struct net *net,
537 const struct xfrm_selector *sel,
538 unsigned short family, int dir)
539 {
540 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
541 unsigned int hash;
542 u8 dbits;
543 u8 sbits;
544
545 __get_hash_thresh(net, family, dir, &dbits, &sbits);
546 hash = __sel_hash(sel, family, hmask, dbits, sbits);
547
548 if (hash == hmask + 1)
549 return NULL;
550
551 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
552 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
553 }
554
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)555 static struct hlist_head *policy_hash_direct(struct net *net,
556 const xfrm_address_t *daddr,
557 const xfrm_address_t *saddr,
558 unsigned short family, int dir)
559 {
560 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
561 unsigned int hash;
562 u8 dbits;
563 u8 sbits;
564
565 __get_hash_thresh(net, family, dir, &dbits, &sbits);
566 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
567
568 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
569 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
570 }
571
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)572 static void xfrm_dst_hash_transfer(struct net *net,
573 struct hlist_head *list,
574 struct hlist_head *ndsttable,
575 unsigned int nhashmask,
576 int dir)
577 {
578 struct hlist_node *tmp, *entry0 = NULL;
579 struct xfrm_policy *pol;
580 unsigned int h0 = 0;
581 u8 dbits;
582 u8 sbits;
583
584 redo:
585 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
586 unsigned int h;
587
588 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
589 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
590 pol->family, nhashmask, dbits, sbits);
591 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
592 hlist_del_rcu(&pol->bydst);
593 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
594 h0 = h;
595 } else {
596 if (h != h0)
597 continue;
598 hlist_del_rcu(&pol->bydst);
599 hlist_add_behind_rcu(&pol->bydst, entry0);
600 }
601 entry0 = &pol->bydst;
602 }
603 if (!hlist_empty(list)) {
604 entry0 = NULL;
605 goto redo;
606 }
607 }
608
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)609 static void xfrm_idx_hash_transfer(struct hlist_head *list,
610 struct hlist_head *nidxtable,
611 unsigned int nhashmask)
612 {
613 struct hlist_node *tmp;
614 struct xfrm_policy *pol;
615
616 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
617 unsigned int h;
618
619 h = __idx_hash(pol->index, nhashmask);
620 hlist_add_head(&pol->byidx, nidxtable+h);
621 }
622 }
623
xfrm_new_hash_mask(unsigned int old_hmask)624 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
625 {
626 return ((old_hmask + 1) << 1) - 1;
627 }
628
xfrm_bydst_resize(struct net * net,int dir)629 static void xfrm_bydst_resize(struct net *net, int dir)
630 {
631 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
632 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
633 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
634 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
635 struct hlist_head *odst;
636 int i;
637
638 if (!ndst)
639 return;
640
641 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
642 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
643
644 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
645 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
646
647 for (i = hmask; i >= 0; i--)
648 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
649
650 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
651 net->xfrm.policy_bydst[dir].hmask = nhashmask;
652
653 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
654 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
655
656 synchronize_rcu();
657
658 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
659 }
660
xfrm_byidx_resize(struct net * net)661 static void xfrm_byidx_resize(struct net *net)
662 {
663 unsigned int hmask = net->xfrm.policy_idx_hmask;
664 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
665 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
666 struct hlist_head *oidx = net->xfrm.policy_byidx;
667 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
668 int i;
669
670 if (!nidx)
671 return;
672
673 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
674
675 for (i = hmask; i >= 0; i--)
676 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
677
678 net->xfrm.policy_byidx = nidx;
679 net->xfrm.policy_idx_hmask = nhashmask;
680
681 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
682
683 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
684 }
685
xfrm_bydst_should_resize(struct net * net,int dir,int * total)686 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
687 {
688 unsigned int cnt = net->xfrm.policy_count[dir];
689 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
690
691 if (total)
692 *total += cnt;
693
694 if ((hmask + 1) < xfrm_policy_hashmax &&
695 cnt > hmask)
696 return 1;
697
698 return 0;
699 }
700
xfrm_byidx_should_resize(struct net * net,int total)701 static inline int xfrm_byidx_should_resize(struct net *net, int total)
702 {
703 unsigned int hmask = net->xfrm.policy_idx_hmask;
704
705 if ((hmask + 1) < xfrm_policy_hashmax &&
706 total > hmask)
707 return 1;
708
709 return 0;
710 }
711
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)712 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
713 {
714 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
715 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
716 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
717 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
718 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
719 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
720 si->spdhcnt = net->xfrm.policy_idx_hmask;
721 si->spdhmcnt = xfrm_policy_hashmax;
722 }
723 EXPORT_SYMBOL(xfrm_spd_getinfo);
724
725 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)726 static void xfrm_hash_resize(struct work_struct *work)
727 {
728 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
729 int dir, total;
730
731 mutex_lock(&hash_resize_mutex);
732
733 total = 0;
734 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
735 if (xfrm_bydst_should_resize(net, dir, &total))
736 xfrm_bydst_resize(net, dir);
737 }
738 if (xfrm_byidx_should_resize(net, total))
739 xfrm_byidx_resize(net);
740
741 mutex_unlock(&hash_resize_mutex);
742 }
743
744 /* Make sure *pol can be inserted into fastbin.
745 * Useful to check that later insert requests will be successful
746 * (provided xfrm_policy_lock is held throughout).
747 */
748 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)749 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
750 {
751 struct xfrm_pol_inexact_bin *bin, *prev;
752 struct xfrm_pol_inexact_key k = {
753 .family = pol->family,
754 .type = pol->type,
755 .dir = dir,
756 .if_id = pol->if_id,
757 };
758 struct net *net = xp_net(pol);
759
760 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
761
762 write_pnet(&k.net, net);
763 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
764 xfrm_pol_inexact_params);
765 if (bin)
766 return bin;
767
768 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
769 if (!bin)
770 return NULL;
771
772 bin->k = k;
773 INIT_HLIST_HEAD(&bin->hhead);
774 bin->root_d = RB_ROOT;
775 bin->root_s = RB_ROOT;
776 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
777
778 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
779 &bin->k, &bin->head,
780 xfrm_pol_inexact_params);
781 if (!prev) {
782 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
783 return bin;
784 }
785
786 kfree(bin);
787
788 return IS_ERR(prev) ? NULL : prev;
789 }
790
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)791 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
792 int family, u8 prefixlen)
793 {
794 if (xfrm_addr_any(addr, family))
795 return true;
796
797 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
798 return true;
799
800 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
801 return true;
802
803 return false;
804 }
805
806 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)807 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
808 {
809 const xfrm_address_t *addr;
810 bool saddr_any, daddr_any;
811 u8 prefixlen;
812
813 addr = &policy->selector.saddr;
814 prefixlen = policy->selector.prefixlen_s;
815
816 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
817 policy->family,
818 prefixlen);
819 addr = &policy->selector.daddr;
820 prefixlen = policy->selector.prefixlen_d;
821 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
822 policy->family,
823 prefixlen);
824 return saddr_any && daddr_any;
825 }
826
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)827 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
828 const xfrm_address_t *addr, u8 prefixlen)
829 {
830 node->addr = *addr;
831 node->prefixlen = prefixlen;
832 }
833
834 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)835 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
836 {
837 struct xfrm_pol_inexact_node *node;
838
839 node = kzalloc(sizeof(*node), GFP_ATOMIC);
840 if (node)
841 xfrm_pol_inexact_node_init(node, addr, prefixlen);
842
843 return node;
844 }
845
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)846 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
847 const xfrm_address_t *b,
848 u8 prefixlen, u16 family)
849 {
850 u32 ma, mb, mask;
851 unsigned int pdw, pbi;
852 int delta = 0;
853
854 switch (family) {
855 case AF_INET:
856 if (prefixlen == 0)
857 return 0;
858 mask = ~0U << (32 - prefixlen);
859 ma = ntohl(a->a4) & mask;
860 mb = ntohl(b->a4) & mask;
861 if (ma < mb)
862 delta = -1;
863 else if (ma > mb)
864 delta = 1;
865 break;
866 case AF_INET6:
867 pdw = prefixlen >> 5;
868 pbi = prefixlen & 0x1f;
869
870 if (pdw) {
871 delta = memcmp(a->a6, b->a6, pdw << 2);
872 if (delta)
873 return delta;
874 }
875 if (pbi) {
876 mask = ~0U << (32 - pbi);
877 ma = ntohl(a->a6[pdw]) & mask;
878 mb = ntohl(b->a6[pdw]) & mask;
879 if (ma < mb)
880 delta = -1;
881 else if (ma > mb)
882 delta = 1;
883 }
884 break;
885 default:
886 break;
887 }
888
889 return delta;
890 }
891
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)892 static void xfrm_policy_inexact_list_reinsert(struct net *net,
893 struct xfrm_pol_inexact_node *n,
894 u16 family)
895 {
896 unsigned int matched_s, matched_d;
897 struct xfrm_policy *policy, *p;
898
899 matched_s = 0;
900 matched_d = 0;
901
902 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
903 struct hlist_node *newpos = NULL;
904 bool matches_s, matches_d;
905
906 if (policy->walk.dead || !policy->bydst_reinsert)
907 continue;
908
909 WARN_ON_ONCE(policy->family != family);
910
911 policy->bydst_reinsert = false;
912 hlist_for_each_entry(p, &n->hhead, bydst) {
913 if (policy->priority > p->priority)
914 newpos = &p->bydst;
915 else if (policy->priority == p->priority &&
916 policy->pos > p->pos)
917 newpos = &p->bydst;
918 else
919 break;
920 }
921
922 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
923 hlist_add_behind_rcu(&policy->bydst, newpos);
924 else
925 hlist_add_head_rcu(&policy->bydst, &n->hhead);
926
927 /* paranoia checks follow.
928 * Check that the reinserted policy matches at least
929 * saddr or daddr for current node prefix.
930 *
931 * Matching both is fine, matching saddr in one policy
932 * (but not daddr) and then matching only daddr in another
933 * is a bug.
934 */
935 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
936 &n->addr,
937 n->prefixlen,
938 family) == 0;
939 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
940 &n->addr,
941 n->prefixlen,
942 family) == 0;
943 if (matches_s && matches_d)
944 continue;
945
946 WARN_ON_ONCE(!matches_s && !matches_d);
947 if (matches_s)
948 matched_s++;
949 if (matches_d)
950 matched_d++;
951 WARN_ON_ONCE(matched_s && matched_d);
952 }
953 }
954
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)955 static void xfrm_policy_inexact_node_reinsert(struct net *net,
956 struct xfrm_pol_inexact_node *n,
957 struct rb_root *new,
958 u16 family)
959 {
960 struct xfrm_pol_inexact_node *node;
961 struct rb_node **p, *parent;
962
963 /* we should not have another subtree here */
964 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
965 restart:
966 parent = NULL;
967 p = &new->rb_node;
968 while (*p) {
969 u8 prefixlen;
970 int delta;
971
972 parent = *p;
973 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
974
975 prefixlen = min(node->prefixlen, n->prefixlen);
976
977 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
978 prefixlen, family);
979 if (delta < 0) {
980 p = &parent->rb_left;
981 } else if (delta > 0) {
982 p = &parent->rb_right;
983 } else {
984 bool same_prefixlen = node->prefixlen == n->prefixlen;
985 struct xfrm_policy *tmp;
986
987 hlist_for_each_entry(tmp, &n->hhead, bydst) {
988 tmp->bydst_reinsert = true;
989 hlist_del_rcu(&tmp->bydst);
990 }
991
992 node->prefixlen = prefixlen;
993
994 xfrm_policy_inexact_list_reinsert(net, node, family);
995
996 if (same_prefixlen) {
997 kfree_rcu(n, rcu);
998 return;
999 }
1000
1001 rb_erase(*p, new);
1002 kfree_rcu(n, rcu);
1003 n = node;
1004 goto restart;
1005 }
1006 }
1007
1008 rb_link_node_rcu(&n->node, parent, p);
1009 rb_insert_color(&n->node, new);
1010 }
1011
1012 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)1013 static void xfrm_policy_inexact_node_merge(struct net *net,
1014 struct xfrm_pol_inexact_node *v,
1015 struct xfrm_pol_inexact_node *n,
1016 u16 family)
1017 {
1018 struct xfrm_pol_inexact_node *node;
1019 struct xfrm_policy *tmp;
1020 struct rb_node *rnode;
1021
1022 /* To-be-merged node v has a subtree.
1023 *
1024 * Dismantle it and insert its nodes to n->root.
1025 */
1026 while ((rnode = rb_first(&v->root)) != NULL) {
1027 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
1028 rb_erase(&node->node, &v->root);
1029 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
1030 family);
1031 }
1032
1033 hlist_for_each_entry(tmp, &v->hhead, bydst) {
1034 tmp->bydst_reinsert = true;
1035 hlist_del_rcu(&tmp->bydst);
1036 }
1037
1038 xfrm_policy_inexact_list_reinsert(net, n, family);
1039 }
1040
1041 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)1042 xfrm_policy_inexact_insert_node(struct net *net,
1043 struct rb_root *root,
1044 xfrm_address_t *addr,
1045 u16 family, u8 prefixlen, u8 dir)
1046 {
1047 struct xfrm_pol_inexact_node *cached = NULL;
1048 struct rb_node **p, *parent = NULL;
1049 struct xfrm_pol_inexact_node *node;
1050
1051 p = &root->rb_node;
1052 while (*p) {
1053 int delta;
1054
1055 parent = *p;
1056 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1057
1058 delta = xfrm_policy_addr_delta(addr, &node->addr,
1059 node->prefixlen,
1060 family);
1061 if (delta == 0 && prefixlen >= node->prefixlen) {
1062 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1063 return node;
1064 }
1065
1066 if (delta < 0)
1067 p = &parent->rb_left;
1068 else
1069 p = &parent->rb_right;
1070
1071 if (prefixlen < node->prefixlen) {
1072 delta = xfrm_policy_addr_delta(addr, &node->addr,
1073 prefixlen,
1074 family);
1075 if (delta)
1076 continue;
1077
1078 /* This node is a subnet of the new prefix. It needs
1079 * to be removed and re-inserted with the smaller
1080 * prefix and all nodes that are now also covered
1081 * by the reduced prefixlen.
1082 */
1083 rb_erase(&node->node, root);
1084
1085 if (!cached) {
1086 xfrm_pol_inexact_node_init(node, addr,
1087 prefixlen);
1088 cached = node;
1089 } else {
1090 /* This node also falls within the new
1091 * prefixlen. Merge the to-be-reinserted
1092 * node and this one.
1093 */
1094 xfrm_policy_inexact_node_merge(net, node,
1095 cached, family);
1096 kfree_rcu(node, rcu);
1097 }
1098
1099 /* restart */
1100 p = &root->rb_node;
1101 parent = NULL;
1102 }
1103 }
1104
1105 node = cached;
1106 if (!node) {
1107 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1108 if (!node)
1109 return NULL;
1110 }
1111
1112 rb_link_node_rcu(&node->node, parent, p);
1113 rb_insert_color(&node->node, root);
1114
1115 return node;
1116 }
1117
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1118 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1119 {
1120 struct xfrm_pol_inexact_node *node;
1121 struct rb_node *rn = rb_first(r);
1122
1123 while (rn) {
1124 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1125
1126 xfrm_policy_inexact_gc_tree(&node->root, rm);
1127 rn = rb_next(rn);
1128
1129 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1130 WARN_ON_ONCE(rm);
1131 continue;
1132 }
1133
1134 rb_erase(&node->node, r);
1135 kfree_rcu(node, rcu);
1136 }
1137 }
1138
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1139 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1140 {
1141 write_seqcount_begin(&b->count);
1142 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1143 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1144 write_seqcount_end(&b->count);
1145
1146 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1147 !hlist_empty(&b->hhead)) {
1148 WARN_ON_ONCE(net_exit);
1149 return;
1150 }
1151
1152 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1153 xfrm_pol_inexact_params) == 0) {
1154 list_del(&b->inexact_bins);
1155 kfree_rcu(b, rcu);
1156 }
1157 }
1158
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1159 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1160 {
1161 struct net *net = read_pnet(&b->k.net);
1162
1163 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1164 __xfrm_policy_inexact_prune_bin(b, false);
1165 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1166 }
1167
__xfrm_policy_inexact_flush(struct net * net)1168 static void __xfrm_policy_inexact_flush(struct net *net)
1169 {
1170 struct xfrm_pol_inexact_bin *bin, *t;
1171
1172 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1173
1174 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1175 __xfrm_policy_inexact_prune_bin(bin, false);
1176 }
1177
1178 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1179 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1180 struct xfrm_policy *policy, u8 dir)
1181 {
1182 struct xfrm_pol_inexact_node *n;
1183 struct net *net;
1184
1185 net = xp_net(policy);
1186 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1187
1188 if (xfrm_policy_inexact_insert_use_any_list(policy))
1189 return &bin->hhead;
1190
1191 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1192 policy->family,
1193 policy->selector.prefixlen_d)) {
1194 write_seqcount_begin(&bin->count);
1195 n = xfrm_policy_inexact_insert_node(net,
1196 &bin->root_s,
1197 &policy->selector.saddr,
1198 policy->family,
1199 policy->selector.prefixlen_s,
1200 dir);
1201 write_seqcount_end(&bin->count);
1202 if (!n)
1203 return NULL;
1204
1205 return &n->hhead;
1206 }
1207
1208 /* daddr is fixed */
1209 write_seqcount_begin(&bin->count);
1210 n = xfrm_policy_inexact_insert_node(net,
1211 &bin->root_d,
1212 &policy->selector.daddr,
1213 policy->family,
1214 policy->selector.prefixlen_d, dir);
1215 write_seqcount_end(&bin->count);
1216 if (!n)
1217 return NULL;
1218
1219 /* saddr is wildcard */
1220 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1221 policy->family,
1222 policy->selector.prefixlen_s))
1223 return &n->hhead;
1224
1225 write_seqcount_begin(&bin->count);
1226 n = xfrm_policy_inexact_insert_node(net,
1227 &n->root,
1228 &policy->selector.saddr,
1229 policy->family,
1230 policy->selector.prefixlen_s, dir);
1231 write_seqcount_end(&bin->count);
1232 if (!n)
1233 return NULL;
1234
1235 return &n->hhead;
1236 }
1237
1238 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1239 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1240 {
1241 struct xfrm_pol_inexact_bin *bin;
1242 struct xfrm_policy *delpol;
1243 struct hlist_head *chain;
1244 struct net *net;
1245
1246 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1247 if (!bin)
1248 return ERR_PTR(-ENOMEM);
1249
1250 net = xp_net(policy);
1251 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1252
1253 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1254 if (!chain) {
1255 __xfrm_policy_inexact_prune_bin(bin, false);
1256 return ERR_PTR(-ENOMEM);
1257 }
1258
1259 delpol = xfrm_policy_insert_list(chain, policy, excl);
1260 if (delpol && excl) {
1261 __xfrm_policy_inexact_prune_bin(bin, false);
1262 return ERR_PTR(-EEXIST);
1263 }
1264
1265 if (delpol)
1266 __xfrm_policy_inexact_prune_bin(bin, false);
1267
1268 return delpol;
1269 }
1270
xfrm_policy_is_dead_or_sk(const struct xfrm_policy * policy)1271 static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
1272 {
1273 int dir;
1274
1275 if (policy->walk.dead)
1276 return true;
1277
1278 dir = xfrm_policy_id2dir(policy->index);
1279 return dir >= XFRM_POLICY_MAX;
1280 }
1281
xfrm_hash_rebuild(struct work_struct * work)1282 static void xfrm_hash_rebuild(struct work_struct *work)
1283 {
1284 struct net *net = container_of(work, struct net,
1285 xfrm.policy_hthresh.work);
1286 struct xfrm_policy *pol;
1287 struct xfrm_policy *policy;
1288 struct hlist_head *chain;
1289 struct hlist_node *newpos;
1290 int dir;
1291 unsigned seq;
1292 u8 lbits4, rbits4, lbits6, rbits6;
1293
1294 mutex_lock(&hash_resize_mutex);
1295
1296 /* read selector prefixlen thresholds */
1297 do {
1298 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1299
1300 lbits4 = net->xfrm.policy_hthresh.lbits4;
1301 rbits4 = net->xfrm.policy_hthresh.rbits4;
1302 lbits6 = net->xfrm.policy_hthresh.lbits6;
1303 rbits6 = net->xfrm.policy_hthresh.rbits6;
1304 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1305
1306 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1307 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1308
1309 /* make sure that we can insert the indirect policies again before
1310 * we start with destructive action.
1311 */
1312 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1313 struct xfrm_pol_inexact_bin *bin;
1314 u8 dbits, sbits;
1315
1316 if (xfrm_policy_is_dead_or_sk(policy))
1317 continue;
1318
1319 dir = xfrm_policy_id2dir(policy->index);
1320 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1321 if (policy->family == AF_INET) {
1322 dbits = rbits4;
1323 sbits = lbits4;
1324 } else {
1325 dbits = rbits6;
1326 sbits = lbits6;
1327 }
1328 } else {
1329 if (policy->family == AF_INET) {
1330 dbits = lbits4;
1331 sbits = rbits4;
1332 } else {
1333 dbits = lbits6;
1334 sbits = rbits6;
1335 }
1336 }
1337
1338 if (policy->selector.prefixlen_d < dbits ||
1339 policy->selector.prefixlen_s < sbits)
1340 continue;
1341
1342 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1343 if (!bin)
1344 goto out_unlock;
1345
1346 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1347 goto out_unlock;
1348 }
1349
1350 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1351 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1352 /* dir out => dst = remote, src = local */
1353 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1354 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1355 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1356 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1357 } else {
1358 /* dir in/fwd => dst = local, src = remote */
1359 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1360 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1361 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1362 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1363 }
1364 }
1365
1366 /* re-insert all policies by order of creation */
1367 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1368 if (xfrm_policy_is_dead_or_sk(policy))
1369 continue;
1370
1371 hlist_del_rcu(&policy->bydst);
1372
1373 newpos = NULL;
1374 dir = xfrm_policy_id2dir(policy->index);
1375 chain = policy_hash_bysel(net, &policy->selector,
1376 policy->family, dir);
1377
1378 if (!chain) {
1379 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1380
1381 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1382 continue;
1383 }
1384
1385 hlist_for_each_entry(pol, chain, bydst) {
1386 if (policy->priority >= pol->priority)
1387 newpos = &pol->bydst;
1388 else
1389 break;
1390 }
1391 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1392 hlist_add_behind_rcu(&policy->bydst, newpos);
1393 else
1394 hlist_add_head_rcu(&policy->bydst, chain);
1395 }
1396
1397 out_unlock:
1398 __xfrm_policy_inexact_flush(net);
1399 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1400 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1401
1402 mutex_unlock(&hash_resize_mutex);
1403 }
1404
xfrm_policy_hash_rebuild(struct net * net)1405 void xfrm_policy_hash_rebuild(struct net *net)
1406 {
1407 schedule_work(&net->xfrm.policy_hthresh.work);
1408 }
1409 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1410
1411 /* Generate new index... KAME seems to generate them ordered by cost
1412 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1413 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1414 {
1415 for (;;) {
1416 struct hlist_head *list;
1417 struct xfrm_policy *p;
1418 u32 idx;
1419 int found;
1420
1421 if (!index) {
1422 idx = (net->xfrm.idx_generator | dir);
1423 net->xfrm.idx_generator += 8;
1424 } else {
1425 idx = index;
1426 index = 0;
1427 }
1428
1429 if (idx == 0)
1430 idx = 8;
1431 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1432 found = 0;
1433 hlist_for_each_entry(p, list, byidx) {
1434 if (p->index == idx) {
1435 found = 1;
1436 break;
1437 }
1438 }
1439 if (!found)
1440 return idx;
1441 }
1442 }
1443
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1444 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1445 {
1446 u32 *p1 = (u32 *) s1;
1447 u32 *p2 = (u32 *) s2;
1448 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1449 int i;
1450
1451 for (i = 0; i < len; i++) {
1452 if (p1[i] != p2[i])
1453 return 1;
1454 }
1455
1456 return 0;
1457 }
1458
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1459 static void xfrm_policy_requeue(struct xfrm_policy *old,
1460 struct xfrm_policy *new)
1461 {
1462 struct xfrm_policy_queue *pq = &old->polq;
1463 struct sk_buff_head list;
1464
1465 if (skb_queue_empty(&pq->hold_queue))
1466 return;
1467
1468 __skb_queue_head_init(&list);
1469
1470 spin_lock_bh(&pq->hold_queue.lock);
1471 skb_queue_splice_init(&pq->hold_queue, &list);
1472 if (del_timer(&pq->hold_timer))
1473 xfrm_pol_put(old);
1474 spin_unlock_bh(&pq->hold_queue.lock);
1475
1476 pq = &new->polq;
1477
1478 spin_lock_bh(&pq->hold_queue.lock);
1479 skb_queue_splice(&list, &pq->hold_queue);
1480 pq->timeout = XFRM_QUEUE_TMO_MIN;
1481 if (!mod_timer(&pq->hold_timer, jiffies))
1482 xfrm_pol_hold(new);
1483 spin_unlock_bh(&pq->hold_queue.lock);
1484 }
1485
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1486 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1487 struct xfrm_policy *pol)
1488 {
1489 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1490 }
1491
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1492 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1493 {
1494 const struct xfrm_pol_inexact_key *k = data;
1495 u32 a = k->type << 24 | k->dir << 16 | k->family;
1496
1497 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1498 seed);
1499 }
1500
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1501 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1502 {
1503 const struct xfrm_pol_inexact_bin *b = data;
1504
1505 return xfrm_pol_bin_key(&b->k, 0, seed);
1506 }
1507
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1508 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1509 const void *ptr)
1510 {
1511 const struct xfrm_pol_inexact_key *key = arg->key;
1512 const struct xfrm_pol_inexact_bin *b = ptr;
1513 int ret;
1514
1515 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1516 return -1;
1517
1518 ret = b->k.dir ^ key->dir;
1519 if (ret)
1520 return ret;
1521
1522 ret = b->k.type ^ key->type;
1523 if (ret)
1524 return ret;
1525
1526 ret = b->k.family ^ key->family;
1527 if (ret)
1528 return ret;
1529
1530 return b->k.if_id ^ key->if_id;
1531 }
1532
1533 static const struct rhashtable_params xfrm_pol_inexact_params = {
1534 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1535 .hashfn = xfrm_pol_bin_key,
1536 .obj_hashfn = xfrm_pol_bin_obj,
1537 .obj_cmpfn = xfrm_pol_bin_cmp,
1538 .automatic_shrinking = true,
1539 };
1540
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1541 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1542 struct xfrm_policy *policy,
1543 bool excl)
1544 {
1545 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1546
1547 hlist_for_each_entry(pol, chain, bydst) {
1548 if (pol->type == policy->type &&
1549 pol->if_id == policy->if_id &&
1550 !selector_cmp(&pol->selector, &policy->selector) &&
1551 xfrm_policy_mark_match(&policy->mark, pol) &&
1552 xfrm_sec_ctx_match(pol->security, policy->security) &&
1553 !WARN_ON(delpol)) {
1554 if (excl)
1555 return ERR_PTR(-EEXIST);
1556 delpol = pol;
1557 if (policy->priority > pol->priority)
1558 continue;
1559 } else if (policy->priority >= pol->priority) {
1560 newpos = pol;
1561 continue;
1562 }
1563 if (delpol)
1564 break;
1565 }
1566
1567 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1568 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1569 else
1570 /* Packet offload policies enter to the head
1571 * to speed-up lookups.
1572 */
1573 hlist_add_head_rcu(&policy->bydst, chain);
1574
1575 return delpol;
1576 }
1577
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1578 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1579 {
1580 struct net *net = xp_net(policy);
1581 struct xfrm_policy *delpol;
1582 struct hlist_head *chain;
1583
1584 /* Sanitize mark before store */
1585 policy->mark.v &= policy->mark.m;
1586
1587 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1588 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1589 if (chain)
1590 delpol = xfrm_policy_insert_list(chain, policy, excl);
1591 else
1592 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1593
1594 if (IS_ERR(delpol)) {
1595 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1596 return PTR_ERR(delpol);
1597 }
1598
1599 __xfrm_policy_link(policy, dir);
1600
1601 /* After previous checking, family can either be AF_INET or AF_INET6 */
1602 if (policy->family == AF_INET)
1603 rt_genid_bump_ipv4(net);
1604 else
1605 rt_genid_bump_ipv6(net);
1606
1607 if (delpol) {
1608 xfrm_policy_requeue(delpol, policy);
1609 __xfrm_policy_unlink(delpol, dir);
1610 }
1611 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1612 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1613 policy->curlft.add_time = ktime_get_real_seconds();
1614 policy->curlft.use_time = 0;
1615 if (!mod_timer(&policy->timer, jiffies + HZ))
1616 xfrm_pol_hold(policy);
1617 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1618
1619 if (delpol)
1620 xfrm_policy_kill(delpol);
1621 else if (xfrm_bydst_should_resize(net, dir, NULL))
1622 schedule_work(&net->xfrm.policy_hash_work);
1623
1624 return 0;
1625 }
1626 EXPORT_SYMBOL(xfrm_policy_insert);
1627
1628 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1629 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1630 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1631 struct xfrm_sec_ctx *ctx)
1632 {
1633 struct xfrm_policy *pol;
1634
1635 if (!chain)
1636 return NULL;
1637
1638 hlist_for_each_entry(pol, chain, bydst) {
1639 if (pol->type == type &&
1640 pol->if_id == if_id &&
1641 xfrm_policy_mark_match(mark, pol) &&
1642 !selector_cmp(sel, &pol->selector) &&
1643 xfrm_sec_ctx_match(ctx, pol->security))
1644 return pol;
1645 }
1646
1647 return NULL;
1648 }
1649
1650 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1651 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1652 u8 type, int dir, struct xfrm_selector *sel,
1653 struct xfrm_sec_ctx *ctx, int delete, int *err)
1654 {
1655 struct xfrm_pol_inexact_bin *bin = NULL;
1656 struct xfrm_policy *pol, *ret = NULL;
1657 struct hlist_head *chain;
1658
1659 *err = 0;
1660 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1661 chain = policy_hash_bysel(net, sel, sel->family, dir);
1662 if (!chain) {
1663 struct xfrm_pol_inexact_candidates cand;
1664 int i;
1665
1666 bin = xfrm_policy_inexact_lookup(net, type,
1667 sel->family, dir, if_id);
1668 if (!bin) {
1669 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1670 return NULL;
1671 }
1672
1673 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1674 &sel->saddr,
1675 &sel->daddr)) {
1676 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1677 return NULL;
1678 }
1679
1680 pol = NULL;
1681 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1682 struct xfrm_policy *tmp;
1683
1684 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1685 if_id, type, dir,
1686 sel, ctx);
1687 if (!tmp)
1688 continue;
1689
1690 if (!pol || tmp->pos < pol->pos)
1691 pol = tmp;
1692 }
1693 } else {
1694 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1695 sel, ctx);
1696 }
1697
1698 if (pol) {
1699 xfrm_pol_hold(pol);
1700 if (delete) {
1701 *err = security_xfrm_policy_delete(pol->security);
1702 if (*err) {
1703 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1704 return pol;
1705 }
1706 __xfrm_policy_unlink(pol, dir);
1707 }
1708 ret = pol;
1709 }
1710 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1711
1712 if (ret && delete)
1713 xfrm_policy_kill(ret);
1714 if (bin && delete)
1715 xfrm_policy_inexact_prune_bin(bin);
1716 return ret;
1717 }
1718 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1719
1720 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1721 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1722 u8 type, int dir, u32 id, int delete, int *err)
1723 {
1724 struct xfrm_policy *pol, *ret;
1725 struct hlist_head *chain;
1726
1727 *err = -ENOENT;
1728 if (xfrm_policy_id2dir(id) != dir)
1729 return NULL;
1730
1731 *err = 0;
1732 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1733 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1734 ret = NULL;
1735 hlist_for_each_entry(pol, chain, byidx) {
1736 if (pol->type == type && pol->index == id &&
1737 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1738 xfrm_pol_hold(pol);
1739 if (delete) {
1740 *err = security_xfrm_policy_delete(
1741 pol->security);
1742 if (*err) {
1743 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1744 return pol;
1745 }
1746 __xfrm_policy_unlink(pol, dir);
1747 }
1748 ret = pol;
1749 break;
1750 }
1751 }
1752 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1753
1754 if (ret && delete)
1755 xfrm_policy_kill(ret);
1756 return ret;
1757 }
1758 EXPORT_SYMBOL(xfrm_policy_byid);
1759
1760 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1761 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1762 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1763 {
1764 struct xfrm_policy *pol;
1765 int err = 0;
1766
1767 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1768 if (pol->walk.dead ||
1769 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1770 pol->type != type)
1771 continue;
1772
1773 err = security_xfrm_policy_delete(pol->security);
1774 if (err) {
1775 xfrm_audit_policy_delete(pol, 0, task_valid);
1776 return err;
1777 }
1778 }
1779 return err;
1780 }
1781
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1782 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1783 struct net_device *dev,
1784 bool task_valid)
1785 {
1786 struct xfrm_policy *pol;
1787 int err = 0;
1788
1789 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1790 if (pol->walk.dead ||
1791 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1792 pol->xdo.dev != dev)
1793 continue;
1794
1795 err = security_xfrm_policy_delete(pol->security);
1796 if (err) {
1797 xfrm_audit_policy_delete(pol, 0, task_valid);
1798 return err;
1799 }
1800 }
1801 return err;
1802 }
1803 #else
1804 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1805 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1806 {
1807 return 0;
1808 }
1809
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1810 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1811 struct net_device *dev,
1812 bool task_valid)
1813 {
1814 return 0;
1815 }
1816 #endif
1817
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1818 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1819 {
1820 int dir, err = 0, cnt = 0;
1821 struct xfrm_policy *pol;
1822
1823 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1824
1825 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1826 if (err)
1827 goto out;
1828
1829 again:
1830 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1831 if (pol->walk.dead)
1832 continue;
1833
1834 dir = xfrm_policy_id2dir(pol->index);
1835 if (dir >= XFRM_POLICY_MAX ||
1836 pol->type != type)
1837 continue;
1838
1839 __xfrm_policy_unlink(pol, dir);
1840 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1841 cnt++;
1842 xfrm_audit_policy_delete(pol, 1, task_valid);
1843 xfrm_policy_kill(pol);
1844 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1845 goto again;
1846 }
1847 if (cnt)
1848 __xfrm_policy_inexact_flush(net);
1849 else
1850 err = -ESRCH;
1851 out:
1852 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1853 return err;
1854 }
1855 EXPORT_SYMBOL(xfrm_policy_flush);
1856
xfrm_dev_policy_flush(struct net * net,struct net_device * dev,bool task_valid)1857 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1858 bool task_valid)
1859 {
1860 int dir, err = 0, cnt = 0;
1861 struct xfrm_policy *pol;
1862
1863 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1864
1865 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1866 if (err)
1867 goto out;
1868
1869 again:
1870 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1871 if (pol->walk.dead)
1872 continue;
1873
1874 dir = xfrm_policy_id2dir(pol->index);
1875 if (dir >= XFRM_POLICY_MAX ||
1876 pol->xdo.dev != dev)
1877 continue;
1878
1879 __xfrm_policy_unlink(pol, dir);
1880 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1881 cnt++;
1882 xfrm_audit_policy_delete(pol, 1, task_valid);
1883 xfrm_policy_kill(pol);
1884 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1885 goto again;
1886 }
1887 if (cnt)
1888 __xfrm_policy_inexact_flush(net);
1889 else
1890 err = -ESRCH;
1891 out:
1892 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1893 return err;
1894 }
1895 EXPORT_SYMBOL(xfrm_dev_policy_flush);
1896
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1897 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1898 int (*func)(struct xfrm_policy *, int, int, void*),
1899 void *data)
1900 {
1901 struct xfrm_policy *pol;
1902 struct xfrm_policy_walk_entry *x;
1903 int error = 0;
1904
1905 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1906 walk->type != XFRM_POLICY_TYPE_ANY)
1907 return -EINVAL;
1908
1909 if (list_empty(&walk->walk.all) && walk->seq != 0)
1910 return 0;
1911
1912 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1913 if (list_empty(&walk->walk.all))
1914 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1915 else
1916 x = list_first_entry(&walk->walk.all,
1917 struct xfrm_policy_walk_entry, all);
1918
1919 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1920 if (x->dead)
1921 continue;
1922 pol = container_of(x, struct xfrm_policy, walk);
1923 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1924 walk->type != pol->type)
1925 continue;
1926 error = func(pol, xfrm_policy_id2dir(pol->index),
1927 walk->seq, data);
1928 if (error) {
1929 list_move_tail(&walk->walk.all, &x->all);
1930 goto out;
1931 }
1932 walk->seq++;
1933 }
1934 if (walk->seq == 0) {
1935 error = -ENOENT;
1936 goto out;
1937 }
1938 list_del_init(&walk->walk.all);
1939 out:
1940 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1941 return error;
1942 }
1943 EXPORT_SYMBOL(xfrm_policy_walk);
1944
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1945 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1946 {
1947 INIT_LIST_HEAD(&walk->walk.all);
1948 walk->walk.dead = 1;
1949 walk->type = type;
1950 walk->seq = 0;
1951 }
1952 EXPORT_SYMBOL(xfrm_policy_walk_init);
1953
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1954 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1955 {
1956 if (list_empty(&walk->walk.all))
1957 return;
1958
1959 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1960 list_del(&walk->walk.all);
1961 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1962 }
1963 EXPORT_SYMBOL(xfrm_policy_walk_done);
1964
1965 /*
1966 * Find policy to apply to this flow.
1967 *
1968 * Returns 0 if policy found, else an -errno.
1969 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,u32 if_id)1970 static int xfrm_policy_match(const struct xfrm_policy *pol,
1971 const struct flowi *fl,
1972 u8 type, u16 family, u32 if_id)
1973 {
1974 const struct xfrm_selector *sel = &pol->selector;
1975 int ret = -ESRCH;
1976 bool match;
1977
1978 if (pol->family != family ||
1979 pol->if_id != if_id ||
1980 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1981 pol->type != type)
1982 return ret;
1983
1984 match = xfrm_selector_match(sel, fl, family);
1985 if (match)
1986 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1987 return ret;
1988 }
1989
1990 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)1991 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1992 seqcount_spinlock_t *count,
1993 const xfrm_address_t *addr, u16 family)
1994 {
1995 const struct rb_node *parent;
1996 int seq;
1997
1998 again:
1999 seq = read_seqcount_begin(count);
2000
2001 parent = rcu_dereference_raw(r->rb_node);
2002 while (parent) {
2003 struct xfrm_pol_inexact_node *node;
2004 int delta;
2005
2006 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2007
2008 delta = xfrm_policy_addr_delta(addr, &node->addr,
2009 node->prefixlen, family);
2010 if (delta < 0) {
2011 parent = rcu_dereference_raw(parent->rb_left);
2012 continue;
2013 } else if (delta > 0) {
2014 parent = rcu_dereference_raw(parent->rb_right);
2015 continue;
2016 }
2017
2018 return node;
2019 }
2020
2021 if (read_seqcount_retry(count, seq))
2022 goto again;
2023
2024 return NULL;
2025 }
2026
2027 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)2028 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2029 struct xfrm_pol_inexact_bin *b,
2030 const xfrm_address_t *saddr,
2031 const xfrm_address_t *daddr)
2032 {
2033 struct xfrm_pol_inexact_node *n;
2034 u16 family;
2035
2036 if (!b)
2037 return false;
2038
2039 family = b->k.family;
2040 memset(cand, 0, sizeof(*cand));
2041 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2042
2043 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2044 family);
2045 if (n) {
2046 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2047 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2048 family);
2049 if (n)
2050 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2051 }
2052
2053 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2054 family);
2055 if (n)
2056 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2057
2058 return true;
2059 }
2060
2061 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2062 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2063 u8 dir, u32 if_id)
2064 {
2065 struct xfrm_pol_inexact_key k = {
2066 .family = family,
2067 .type = type,
2068 .dir = dir,
2069 .if_id = if_id,
2070 };
2071
2072 write_pnet(&k.net, net);
2073
2074 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2075 xfrm_pol_inexact_params);
2076 }
2077
2078 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2079 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2080 u8 dir, u32 if_id)
2081 {
2082 struct xfrm_pol_inexact_bin *bin;
2083
2084 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2085
2086 rcu_read_lock();
2087 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2088 rcu_read_unlock();
2089
2090 return bin;
2091 }
2092
2093 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2094 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2095 struct xfrm_policy *prefer,
2096 const struct flowi *fl,
2097 u8 type, u16 family, u32 if_id)
2098 {
2099 u32 priority = prefer ? prefer->priority : ~0u;
2100 struct xfrm_policy *pol;
2101
2102 if (!chain)
2103 return NULL;
2104
2105 hlist_for_each_entry_rcu(pol, chain, bydst) {
2106 int err;
2107
2108 if (pol->priority > priority)
2109 break;
2110
2111 err = xfrm_policy_match(pol, fl, type, family, if_id);
2112 if (err) {
2113 if (err != -ESRCH)
2114 return ERR_PTR(err);
2115
2116 continue;
2117 }
2118
2119 if (prefer) {
2120 /* matches. Is it older than *prefer? */
2121 if (pol->priority == priority &&
2122 prefer->pos < pol->pos)
2123 return prefer;
2124 }
2125
2126 return pol;
2127 }
2128
2129 return NULL;
2130 }
2131
2132 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2133 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2134 struct xfrm_policy *prefer,
2135 const struct flowi *fl,
2136 u8 type, u16 family, u32 if_id)
2137 {
2138 struct xfrm_policy *tmp;
2139 int i;
2140
2141 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2142 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2143 prefer,
2144 fl, type, family, if_id);
2145 if (!tmp)
2146 continue;
2147
2148 if (IS_ERR(tmp))
2149 return tmp;
2150 prefer = tmp;
2151 }
2152
2153 return prefer;
2154 }
2155
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2156 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2157 const struct flowi *fl,
2158 u16 family, u8 dir,
2159 u32 if_id)
2160 {
2161 struct xfrm_pol_inexact_candidates cand;
2162 const xfrm_address_t *daddr, *saddr;
2163 struct xfrm_pol_inexact_bin *bin;
2164 struct xfrm_policy *pol, *ret;
2165 struct hlist_head *chain;
2166 unsigned int sequence;
2167 int err;
2168
2169 daddr = xfrm_flowi_daddr(fl, family);
2170 saddr = xfrm_flowi_saddr(fl, family);
2171 if (unlikely(!daddr || !saddr))
2172 return NULL;
2173
2174 rcu_read_lock();
2175 retry:
2176 do {
2177 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2178 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2179 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2180
2181 ret = NULL;
2182 hlist_for_each_entry_rcu(pol, chain, bydst) {
2183 err = xfrm_policy_match(pol, fl, type, family, if_id);
2184 if (err) {
2185 if (err == -ESRCH)
2186 continue;
2187 else {
2188 ret = ERR_PTR(err);
2189 goto fail;
2190 }
2191 } else {
2192 ret = pol;
2193 break;
2194 }
2195 }
2196 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2197 goto skip_inexact;
2198
2199 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2200 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2201 daddr))
2202 goto skip_inexact;
2203
2204 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2205 family, if_id);
2206 if (pol) {
2207 ret = pol;
2208 if (IS_ERR(pol))
2209 goto fail;
2210 }
2211
2212 skip_inexact:
2213 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2214 goto retry;
2215
2216 if (ret && !xfrm_pol_hold_rcu(ret))
2217 goto retry;
2218 fail:
2219 rcu_read_unlock();
2220
2221 return ret;
2222 }
2223
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2224 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2225 const struct flowi *fl,
2226 u16 family, u8 dir, u32 if_id)
2227 {
2228 #ifdef CONFIG_XFRM_SUB_POLICY
2229 struct xfrm_policy *pol;
2230
2231 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2232 dir, if_id);
2233 if (pol != NULL)
2234 return pol;
2235 #endif
2236 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2237 dir, if_id);
2238 }
2239
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2240 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2241 const struct flowi *fl,
2242 u16 family, u32 if_id)
2243 {
2244 struct xfrm_policy *pol;
2245
2246 rcu_read_lock();
2247 again:
2248 pol = rcu_dereference(sk->sk_policy[dir]);
2249 if (pol != NULL) {
2250 bool match;
2251 int err = 0;
2252
2253 if (pol->family != family) {
2254 pol = NULL;
2255 goto out;
2256 }
2257
2258 match = xfrm_selector_match(&pol->selector, fl, family);
2259 if (match) {
2260 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2261 pol->if_id != if_id) {
2262 pol = NULL;
2263 goto out;
2264 }
2265 err = security_xfrm_policy_lookup(pol->security,
2266 fl->flowi_secid);
2267 if (!err) {
2268 if (!xfrm_pol_hold_rcu(pol))
2269 goto again;
2270 } else if (err == -ESRCH) {
2271 pol = NULL;
2272 } else {
2273 pol = ERR_PTR(err);
2274 }
2275 } else
2276 pol = NULL;
2277 }
2278 out:
2279 rcu_read_unlock();
2280 return pol;
2281 }
2282
xfrm_gen_pos_slow(struct net * net)2283 static u32 xfrm_gen_pos_slow(struct net *net)
2284 {
2285 struct xfrm_policy *policy;
2286 u32 i = 0;
2287
2288 /* oldest entry is last in list */
2289 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
2290 if (!xfrm_policy_is_dead_or_sk(policy))
2291 policy->pos = ++i;
2292 }
2293
2294 return i;
2295 }
2296
xfrm_gen_pos(struct net * net)2297 static u32 xfrm_gen_pos(struct net *net)
2298 {
2299 const struct xfrm_policy *policy;
2300 u32 i = 0;
2301
2302 /* most recently added policy is at the head of the list */
2303 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
2304 if (xfrm_policy_is_dead_or_sk(policy))
2305 continue;
2306
2307 if (policy->pos == UINT_MAX)
2308 return xfrm_gen_pos_slow(net);
2309
2310 i = policy->pos + 1;
2311 break;
2312 }
2313
2314 return i;
2315 }
2316
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2317 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2318 {
2319 struct net *net = xp_net(pol);
2320
2321 switch (dir) {
2322 case XFRM_POLICY_IN:
2323 case XFRM_POLICY_FWD:
2324 case XFRM_POLICY_OUT:
2325 pol->pos = xfrm_gen_pos(net);
2326 break;
2327 }
2328
2329 list_add(&pol->walk.all, &net->xfrm.policy_all);
2330 net->xfrm.policy_count[dir]++;
2331 xfrm_pol_hold(pol);
2332 }
2333
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2334 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2335 int dir)
2336 {
2337 struct net *net = xp_net(pol);
2338
2339 if (list_empty(&pol->walk.all))
2340 return NULL;
2341
2342 /* Socket policies are not hashed. */
2343 if (!hlist_unhashed(&pol->bydst)) {
2344 hlist_del_rcu(&pol->bydst);
2345 hlist_del(&pol->byidx);
2346 }
2347
2348 list_del_init(&pol->walk.all);
2349 net->xfrm.policy_count[dir]--;
2350
2351 return pol;
2352 }
2353
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2354 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2355 {
2356 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2357 }
2358
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2359 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2360 {
2361 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2362 }
2363
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2364 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2365 {
2366 struct net *net = xp_net(pol);
2367
2368 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2369 pol = __xfrm_policy_unlink(pol, dir);
2370 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2371 if (pol) {
2372 xfrm_policy_kill(pol);
2373 return 0;
2374 }
2375 return -ENOENT;
2376 }
2377 EXPORT_SYMBOL(xfrm_policy_delete);
2378
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2379 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2380 {
2381 struct net *net = sock_net(sk);
2382 struct xfrm_policy *old_pol;
2383
2384 #ifdef CONFIG_XFRM_SUB_POLICY
2385 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2386 return -EINVAL;
2387 #endif
2388
2389 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2390 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2391 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2392 if (pol) {
2393 pol->curlft.add_time = ktime_get_real_seconds();
2394 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2395 xfrm_sk_policy_link(pol, dir);
2396 }
2397 rcu_assign_pointer(sk->sk_policy[dir], pol);
2398 if (old_pol) {
2399 if (pol)
2400 xfrm_policy_requeue(old_pol, pol);
2401
2402 /* Unlinking succeeds always. This is the only function
2403 * allowed to delete or replace socket policy.
2404 */
2405 xfrm_sk_policy_unlink(old_pol, dir);
2406 }
2407 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2408
2409 if (old_pol) {
2410 xfrm_policy_kill(old_pol);
2411 }
2412 return 0;
2413 }
2414
clone_policy(const struct xfrm_policy * old,int dir)2415 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2416 {
2417 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2418 struct net *net = xp_net(old);
2419
2420 if (newp) {
2421 newp->selector = old->selector;
2422 if (security_xfrm_policy_clone(old->security,
2423 &newp->security)) {
2424 kfree(newp);
2425 return NULL; /* ENOMEM */
2426 }
2427 newp->lft = old->lft;
2428 newp->curlft = old->curlft;
2429 newp->mark = old->mark;
2430 newp->if_id = old->if_id;
2431 newp->action = old->action;
2432 newp->flags = old->flags;
2433 newp->xfrm_nr = old->xfrm_nr;
2434 newp->index = old->index;
2435 newp->type = old->type;
2436 newp->family = old->family;
2437 memcpy(newp->xfrm_vec, old->xfrm_vec,
2438 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2439 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2440 xfrm_sk_policy_link(newp, dir);
2441 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2442 xfrm_pol_put(newp);
2443 }
2444 return newp;
2445 }
2446
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2447 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2448 {
2449 const struct xfrm_policy *p;
2450 struct xfrm_policy *np;
2451 int i, ret = 0;
2452
2453 rcu_read_lock();
2454 for (i = 0; i < 2; i++) {
2455 p = rcu_dereference(osk->sk_policy[i]);
2456 if (p) {
2457 np = clone_policy(p, i);
2458 if (unlikely(!np)) {
2459 ret = -ENOMEM;
2460 break;
2461 }
2462 rcu_assign_pointer(sk->sk_policy[i], np);
2463 }
2464 }
2465 rcu_read_unlock();
2466 return ret;
2467 }
2468
2469 static int
xfrm_get_saddr(unsigned short family,xfrm_address_t * saddr,const struct xfrm_dst_lookup_params * params)2470 xfrm_get_saddr(unsigned short family, xfrm_address_t *saddr,
2471 const struct xfrm_dst_lookup_params *params)
2472 {
2473 int err;
2474 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2475
2476 if (unlikely(afinfo == NULL))
2477 return -EINVAL;
2478 err = afinfo->get_saddr(saddr, params);
2479 rcu_read_unlock();
2480 return err;
2481 }
2482
2483 /* Resolve list of templates for the flow, given policy. */
2484
2485 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2486 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2487 struct xfrm_state **xfrm, unsigned short family)
2488 {
2489 struct net *net = xp_net(policy);
2490 int nx;
2491 int i, error;
2492 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2493 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2494 xfrm_address_t tmp;
2495
2496 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2497 struct xfrm_state *x;
2498 xfrm_address_t *remote = daddr;
2499 xfrm_address_t *local = saddr;
2500 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2501
2502 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2503 tmpl->mode == XFRM_MODE_BEET) {
2504 remote = &tmpl->id.daddr;
2505 local = &tmpl->saddr;
2506 if (xfrm_addr_any(local, tmpl->encap_family)) {
2507 struct xfrm_dst_lookup_params params;
2508
2509 memset(¶ms, 0, sizeof(params));
2510 params.net = net;
2511 params.oif = fl->flowi_oif;
2512 params.daddr = remote;
2513 error = xfrm_get_saddr(tmpl->encap_family, &tmp,
2514 ¶ms);
2515 if (error)
2516 goto fail;
2517 local = &tmp;
2518 }
2519 }
2520
2521 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2522 family, policy->if_id);
2523 if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) {
2524 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR);
2525 xfrm_state_put(x);
2526 error = -EINVAL;
2527 goto fail;
2528 }
2529
2530 if (x && x->km.state == XFRM_STATE_VALID) {
2531 xfrm[nx++] = x;
2532 daddr = remote;
2533 saddr = local;
2534 continue;
2535 }
2536 if (x) {
2537 error = (x->km.state == XFRM_STATE_ERROR ?
2538 -EINVAL : -EAGAIN);
2539 xfrm_state_put(x);
2540 } else if (error == -ESRCH) {
2541 error = -EAGAIN;
2542 }
2543
2544 if (!tmpl->optional)
2545 goto fail;
2546 }
2547 return nx;
2548
2549 fail:
2550 for (nx--; nx >= 0; nx--)
2551 xfrm_state_put(xfrm[nx]);
2552 return error;
2553 }
2554
2555 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2556 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2557 struct xfrm_state **xfrm, unsigned short family)
2558 {
2559 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2560 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2561 int cnx = 0;
2562 int error;
2563 int ret;
2564 int i;
2565
2566 for (i = 0; i < npols; i++) {
2567 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2568 error = -ENOBUFS;
2569 goto fail;
2570 }
2571
2572 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2573 if (ret < 0) {
2574 error = ret;
2575 goto fail;
2576 } else
2577 cnx += ret;
2578 }
2579
2580 /* found states are sorted for outbound processing */
2581 if (npols > 1)
2582 xfrm_state_sort(xfrm, tpp, cnx, family);
2583
2584 return cnx;
2585
2586 fail:
2587 for (cnx--; cnx >= 0; cnx--)
2588 xfrm_state_put(tpp[cnx]);
2589 return error;
2590
2591 }
2592
xfrm_get_tos(const struct flowi * fl,int family)2593 static int xfrm_get_tos(const struct flowi *fl, int family)
2594 {
2595 if (family == AF_INET)
2596 return fl->u.ip4.flowi4_tos & INET_DSCP_MASK;
2597
2598 return 0;
2599 }
2600
xfrm_alloc_dst(struct net * net,int family)2601 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2602 {
2603 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2604 struct dst_ops *dst_ops;
2605 struct xfrm_dst *xdst;
2606
2607 if (!afinfo)
2608 return ERR_PTR(-EINVAL);
2609
2610 switch (family) {
2611 case AF_INET:
2612 dst_ops = &net->xfrm.xfrm4_dst_ops;
2613 break;
2614 #if IS_ENABLED(CONFIG_IPV6)
2615 case AF_INET6:
2616 dst_ops = &net->xfrm.xfrm6_dst_ops;
2617 break;
2618 #endif
2619 default:
2620 BUG();
2621 }
2622 xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
2623
2624 if (likely(xdst)) {
2625 memset_after(xdst, 0, u.dst);
2626 } else
2627 xdst = ERR_PTR(-ENOBUFS);
2628
2629 rcu_read_unlock();
2630
2631 return xdst;
2632 }
2633
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2634 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2635 int nfheader_len)
2636 {
2637 if (dst->ops->family == AF_INET6) {
2638 path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2639 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2640 }
2641 }
2642
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2643 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2644 const struct flowi *fl)
2645 {
2646 const struct xfrm_policy_afinfo *afinfo =
2647 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2648 int err;
2649
2650 if (!afinfo)
2651 return -EINVAL;
2652
2653 err = afinfo->fill_dst(xdst, dev, fl);
2654
2655 rcu_read_unlock();
2656
2657 return err;
2658 }
2659
2660
2661 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2662 * all the metrics... Shortly, bundle a bundle.
2663 */
2664
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2665 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2666 struct xfrm_state **xfrm,
2667 struct xfrm_dst **bundle,
2668 int nx,
2669 const struct flowi *fl,
2670 struct dst_entry *dst)
2671 {
2672 const struct xfrm_state_afinfo *afinfo;
2673 const struct xfrm_mode *inner_mode;
2674 struct net *net = xp_net(policy);
2675 unsigned long now = jiffies;
2676 struct net_device *dev;
2677 struct xfrm_dst *xdst_prev = NULL;
2678 struct xfrm_dst *xdst0 = NULL;
2679 int i = 0;
2680 int err;
2681 int header_len = 0;
2682 int nfheader_len = 0;
2683 int trailer_len = 0;
2684 int tos;
2685 int family = policy->selector.family;
2686 xfrm_address_t saddr, daddr;
2687
2688 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2689
2690 tos = xfrm_get_tos(fl, family);
2691
2692 dst_hold(dst);
2693
2694 for (; i < nx; i++) {
2695 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2696 struct dst_entry *dst1 = &xdst->u.dst;
2697
2698 err = PTR_ERR(xdst);
2699 if (IS_ERR(xdst)) {
2700 dst_release(dst);
2701 goto put_states;
2702 }
2703
2704 bundle[i] = xdst;
2705 if (!xdst_prev)
2706 xdst0 = xdst;
2707 else
2708 /* Ref count is taken during xfrm_alloc_dst()
2709 * No need to do dst_clone() on dst1
2710 */
2711 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2712
2713 if (xfrm[i]->sel.family == AF_UNSPEC) {
2714 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2715 xfrm_af2proto(family));
2716 if (!inner_mode) {
2717 err = -EAFNOSUPPORT;
2718 dst_release(dst);
2719 goto put_states;
2720 }
2721 } else
2722 inner_mode = &xfrm[i]->inner_mode;
2723
2724 xdst->route = dst;
2725 dst_copy_metrics(dst1, dst);
2726
2727 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2728 __u32 mark = 0;
2729 int oif;
2730
2731 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2732 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2733
2734 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2735 family = xfrm[i]->props.family;
2736
2737 oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2738 dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2739 &saddr, &daddr, family, mark);
2740 err = PTR_ERR(dst);
2741 if (IS_ERR(dst))
2742 goto put_states;
2743 } else
2744 dst_hold(dst);
2745
2746 dst1->xfrm = xfrm[i];
2747 xdst->xfrm_genid = xfrm[i]->genid;
2748
2749 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2750 dst1->lastuse = now;
2751
2752 dst1->input = dst_discard;
2753
2754 rcu_read_lock();
2755 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2756 if (likely(afinfo))
2757 dst1->output = afinfo->output;
2758 else
2759 dst1->output = dst_discard_out;
2760 rcu_read_unlock();
2761
2762 xdst_prev = xdst;
2763
2764 header_len += xfrm[i]->props.header_len;
2765 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2766 nfheader_len += xfrm[i]->props.header_len;
2767 trailer_len += xfrm[i]->props.trailer_len;
2768 }
2769
2770 xfrm_dst_set_child(xdst_prev, dst);
2771 xdst0->path = dst;
2772
2773 err = -ENODEV;
2774 dev = dst->dev;
2775 if (!dev)
2776 goto free_dst;
2777
2778 xfrm_init_path(xdst0, dst, nfheader_len);
2779 xfrm_init_pmtu(bundle, nx);
2780
2781 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2782 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2783 err = xfrm_fill_dst(xdst_prev, dev, fl);
2784 if (err)
2785 goto free_dst;
2786
2787 xdst_prev->u.dst.header_len = header_len;
2788 xdst_prev->u.dst.trailer_len = trailer_len;
2789 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2790 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2791 }
2792
2793 return &xdst0->u.dst;
2794
2795 put_states:
2796 for (; i < nx; i++)
2797 xfrm_state_put(xfrm[i]);
2798 free_dst:
2799 if (xdst0)
2800 dst_release_immediate(&xdst0->u.dst);
2801
2802 return ERR_PTR(err);
2803 }
2804
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2805 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2806 struct xfrm_policy **pols,
2807 int *num_pols, int *num_xfrms)
2808 {
2809 int i;
2810
2811 if (*num_pols == 0 || !pols[0]) {
2812 *num_pols = 0;
2813 *num_xfrms = 0;
2814 return 0;
2815 }
2816 if (IS_ERR(pols[0])) {
2817 *num_pols = 0;
2818 return PTR_ERR(pols[0]);
2819 }
2820
2821 *num_xfrms = pols[0]->xfrm_nr;
2822
2823 #ifdef CONFIG_XFRM_SUB_POLICY
2824 if (pols[0]->action == XFRM_POLICY_ALLOW &&
2825 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2826 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2827 XFRM_POLICY_TYPE_MAIN,
2828 fl, family,
2829 XFRM_POLICY_OUT,
2830 pols[0]->if_id);
2831 if (pols[1]) {
2832 if (IS_ERR(pols[1])) {
2833 xfrm_pols_put(pols, *num_pols);
2834 *num_pols = 0;
2835 return PTR_ERR(pols[1]);
2836 }
2837 (*num_pols)++;
2838 (*num_xfrms) += pols[1]->xfrm_nr;
2839 }
2840 }
2841 #endif
2842 for (i = 0; i < *num_pols; i++) {
2843 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2844 *num_xfrms = -1;
2845 break;
2846 }
2847 }
2848
2849 return 0;
2850
2851 }
2852
2853 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2854 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2855 const struct flowi *fl, u16 family,
2856 struct dst_entry *dst_orig)
2857 {
2858 struct net *net = xp_net(pols[0]);
2859 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2860 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2861 struct xfrm_dst *xdst;
2862 struct dst_entry *dst;
2863 int err;
2864
2865 /* Try to instantiate a bundle */
2866 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2867 if (err <= 0) {
2868 if (err == 0)
2869 return NULL;
2870
2871 if (err != -EAGAIN)
2872 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2873 return ERR_PTR(err);
2874 }
2875
2876 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2877 if (IS_ERR(dst)) {
2878 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2879 return ERR_CAST(dst);
2880 }
2881
2882 xdst = (struct xfrm_dst *)dst;
2883 xdst->num_xfrms = err;
2884 xdst->num_pols = num_pols;
2885 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2886 xdst->policy_genid = atomic_read(&pols[0]->genid);
2887
2888 return xdst;
2889 }
2890
xfrm_policy_queue_process(struct timer_list * t)2891 static void xfrm_policy_queue_process(struct timer_list *t)
2892 {
2893 struct sk_buff *skb;
2894 struct sock *sk;
2895 struct dst_entry *dst;
2896 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2897 struct net *net = xp_net(pol);
2898 struct xfrm_policy_queue *pq = &pol->polq;
2899 struct flowi fl;
2900 struct sk_buff_head list;
2901 __u32 skb_mark;
2902
2903 spin_lock(&pq->hold_queue.lock);
2904 skb = skb_peek(&pq->hold_queue);
2905 if (!skb) {
2906 spin_unlock(&pq->hold_queue.lock);
2907 goto out;
2908 }
2909 dst = skb_dst(skb);
2910 sk = skb->sk;
2911
2912 /* Fixup the mark to support VTI. */
2913 skb_mark = skb->mark;
2914 skb->mark = pol->mark.v;
2915 xfrm_decode_session(net, skb, &fl, dst->ops->family);
2916 skb->mark = skb_mark;
2917 spin_unlock(&pq->hold_queue.lock);
2918
2919 dst_hold(xfrm_dst_path(dst));
2920 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2921 if (IS_ERR(dst))
2922 goto purge_queue;
2923
2924 if (dst->flags & DST_XFRM_QUEUE) {
2925 dst_release(dst);
2926
2927 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2928 goto purge_queue;
2929
2930 pq->timeout = pq->timeout << 1;
2931 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2932 xfrm_pol_hold(pol);
2933 goto out;
2934 }
2935
2936 dst_release(dst);
2937
2938 __skb_queue_head_init(&list);
2939
2940 spin_lock(&pq->hold_queue.lock);
2941 pq->timeout = 0;
2942 skb_queue_splice_init(&pq->hold_queue, &list);
2943 spin_unlock(&pq->hold_queue.lock);
2944
2945 while (!skb_queue_empty(&list)) {
2946 skb = __skb_dequeue(&list);
2947
2948 /* Fixup the mark to support VTI. */
2949 skb_mark = skb->mark;
2950 skb->mark = pol->mark.v;
2951 xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family);
2952 skb->mark = skb_mark;
2953
2954 dst_hold(xfrm_dst_path(skb_dst(skb)));
2955 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2956 if (IS_ERR(dst)) {
2957 kfree_skb(skb);
2958 continue;
2959 }
2960
2961 nf_reset_ct(skb);
2962 skb_dst_drop(skb);
2963 skb_dst_set(skb, dst);
2964
2965 dst_output(net, skb->sk, skb);
2966 }
2967
2968 out:
2969 xfrm_pol_put(pol);
2970 return;
2971
2972 purge_queue:
2973 pq->timeout = 0;
2974 skb_queue_purge(&pq->hold_queue);
2975 xfrm_pol_put(pol);
2976 }
2977
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2978 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2979 {
2980 unsigned long sched_next;
2981 struct dst_entry *dst = skb_dst(skb);
2982 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2983 struct xfrm_policy *pol = xdst->pols[0];
2984 struct xfrm_policy_queue *pq = &pol->polq;
2985
2986 if (unlikely(skb_fclone_busy(sk, skb))) {
2987 kfree_skb(skb);
2988 return 0;
2989 }
2990
2991 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2992 kfree_skb(skb);
2993 return -EAGAIN;
2994 }
2995
2996 skb_dst_force(skb);
2997
2998 spin_lock_bh(&pq->hold_queue.lock);
2999
3000 if (!pq->timeout)
3001 pq->timeout = XFRM_QUEUE_TMO_MIN;
3002
3003 sched_next = jiffies + pq->timeout;
3004
3005 if (del_timer(&pq->hold_timer)) {
3006 if (time_before(pq->hold_timer.expires, sched_next))
3007 sched_next = pq->hold_timer.expires;
3008 xfrm_pol_put(pol);
3009 }
3010
3011 __skb_queue_tail(&pq->hold_queue, skb);
3012 if (!mod_timer(&pq->hold_timer, sched_next))
3013 xfrm_pol_hold(pol);
3014
3015 spin_unlock_bh(&pq->hold_queue.lock);
3016
3017 return 0;
3018 }
3019
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)3020 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
3021 struct xfrm_flo *xflo,
3022 const struct flowi *fl,
3023 int num_xfrms,
3024 u16 family)
3025 {
3026 int err;
3027 struct net_device *dev;
3028 struct dst_entry *dst;
3029 struct dst_entry *dst1;
3030 struct xfrm_dst *xdst;
3031
3032 xdst = xfrm_alloc_dst(net, family);
3033 if (IS_ERR(xdst))
3034 return xdst;
3035
3036 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3037 net->xfrm.sysctl_larval_drop ||
3038 num_xfrms <= 0)
3039 return xdst;
3040
3041 dst = xflo->dst_orig;
3042 dst1 = &xdst->u.dst;
3043 dst_hold(dst);
3044 xdst->route = dst;
3045
3046 dst_copy_metrics(dst1, dst);
3047
3048 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3049 dst1->flags |= DST_XFRM_QUEUE;
3050 dst1->lastuse = jiffies;
3051
3052 dst1->input = dst_discard;
3053 dst1->output = xdst_queue_output;
3054
3055 dst_hold(dst);
3056 xfrm_dst_set_child(xdst, dst);
3057 xdst->path = dst;
3058
3059 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3060
3061 err = -ENODEV;
3062 dev = dst->dev;
3063 if (!dev)
3064 goto free_dst;
3065
3066 err = xfrm_fill_dst(xdst, dev, fl);
3067 if (err)
3068 goto free_dst;
3069
3070 out:
3071 return xdst;
3072
3073 free_dst:
3074 dst_release(dst1);
3075 xdst = ERR_PTR(err);
3076 goto out;
3077 }
3078
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)3079 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3080 const struct flowi *fl,
3081 u16 family, u8 dir,
3082 struct xfrm_flo *xflo, u32 if_id)
3083 {
3084 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3085 int num_pols = 0, num_xfrms = 0, err;
3086 struct xfrm_dst *xdst;
3087
3088 /* Resolve policies to use if we couldn't get them from
3089 * previous cache entry */
3090 num_pols = 1;
3091 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3092 err = xfrm_expand_policies(fl, family, pols,
3093 &num_pols, &num_xfrms);
3094 if (err < 0)
3095 goto inc_error;
3096 if (num_pols == 0)
3097 return NULL;
3098 if (num_xfrms <= 0)
3099 goto make_dummy_bundle;
3100
3101 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3102 xflo->dst_orig);
3103 if (IS_ERR(xdst)) {
3104 err = PTR_ERR(xdst);
3105 if (err == -EREMOTE) {
3106 xfrm_pols_put(pols, num_pols);
3107 return NULL;
3108 }
3109
3110 if (err != -EAGAIN)
3111 goto error;
3112 goto make_dummy_bundle;
3113 } else if (xdst == NULL) {
3114 num_xfrms = 0;
3115 goto make_dummy_bundle;
3116 }
3117
3118 return xdst;
3119
3120 make_dummy_bundle:
3121 /* We found policies, but there's no bundles to instantiate:
3122 * either because the policy blocks, has no transformations or
3123 * we could not build template (no xfrm_states).*/
3124 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3125 if (IS_ERR(xdst)) {
3126 xfrm_pols_put(pols, num_pols);
3127 return ERR_CAST(xdst);
3128 }
3129 xdst->num_pols = num_pols;
3130 xdst->num_xfrms = num_xfrms;
3131 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3132
3133 return xdst;
3134
3135 inc_error:
3136 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3137 error:
3138 xfrm_pols_put(pols, num_pols);
3139 return ERR_PTR(err);
3140 }
3141
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3142 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3143 struct dst_entry *dst_orig)
3144 {
3145 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3146 struct dst_entry *ret;
3147
3148 if (!afinfo) {
3149 dst_release(dst_orig);
3150 return ERR_PTR(-EINVAL);
3151 } else {
3152 ret = afinfo->blackhole_route(net, dst_orig);
3153 }
3154 rcu_read_unlock();
3155
3156 return ret;
3157 }
3158
3159 /* Finds/creates a bundle for given flow and if_id
3160 *
3161 * At the moment we eat a raw IP route. Mostly to speed up lookups
3162 * on interfaces with disabled IPsec.
3163 *
3164 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3165 * compatibility
3166 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3167 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3168 struct dst_entry *dst_orig,
3169 const struct flowi *fl,
3170 const struct sock *sk,
3171 int flags, u32 if_id)
3172 {
3173 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3174 struct xfrm_dst *xdst;
3175 struct dst_entry *dst, *route;
3176 u16 family = dst_orig->ops->family;
3177 u8 dir = XFRM_POLICY_OUT;
3178 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3179
3180 dst = NULL;
3181 xdst = NULL;
3182 route = NULL;
3183
3184 sk = sk_const_to_full_sk(sk);
3185 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3186 num_pols = 1;
3187 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3188 if_id);
3189 err = xfrm_expand_policies(fl, family, pols,
3190 &num_pols, &num_xfrms);
3191 if (err < 0)
3192 goto dropdst;
3193
3194 if (num_pols) {
3195 if (num_xfrms <= 0) {
3196 drop_pols = num_pols;
3197 goto no_transform;
3198 }
3199
3200 xdst = xfrm_resolve_and_create_bundle(
3201 pols, num_pols, fl,
3202 family, dst_orig);
3203
3204 if (IS_ERR(xdst)) {
3205 xfrm_pols_put(pols, num_pols);
3206 err = PTR_ERR(xdst);
3207 if (err == -EREMOTE)
3208 goto nopol;
3209
3210 goto dropdst;
3211 } else if (xdst == NULL) {
3212 num_xfrms = 0;
3213 drop_pols = num_pols;
3214 goto no_transform;
3215 }
3216
3217 route = xdst->route;
3218 }
3219 }
3220
3221 if (xdst == NULL) {
3222 struct xfrm_flo xflo;
3223
3224 xflo.dst_orig = dst_orig;
3225 xflo.flags = flags;
3226
3227 /* To accelerate a bit... */
3228 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3229 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3230 goto nopol;
3231
3232 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3233 if (xdst == NULL)
3234 goto nopol;
3235 if (IS_ERR(xdst)) {
3236 err = PTR_ERR(xdst);
3237 goto dropdst;
3238 }
3239
3240 num_pols = xdst->num_pols;
3241 num_xfrms = xdst->num_xfrms;
3242 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3243 route = xdst->route;
3244 }
3245
3246 dst = &xdst->u.dst;
3247 if (route == NULL && num_xfrms > 0) {
3248 /* The only case when xfrm_bundle_lookup() returns a
3249 * bundle with null route, is when the template could
3250 * not be resolved. It means policies are there, but
3251 * bundle could not be created, since we don't yet
3252 * have the xfrm_state's. We need to wait for KM to
3253 * negotiate new SA's or bail out with error.*/
3254 if (net->xfrm.sysctl_larval_drop) {
3255 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3256 err = -EREMOTE;
3257 goto error;
3258 }
3259
3260 err = -EAGAIN;
3261
3262 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3263 goto error;
3264 }
3265
3266 no_transform:
3267 if (num_pols == 0)
3268 goto nopol;
3269
3270 if ((flags & XFRM_LOOKUP_ICMP) &&
3271 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3272 err = -ENOENT;
3273 goto error;
3274 }
3275
3276 for (i = 0; i < num_pols; i++)
3277 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3278
3279 if (num_xfrms < 0) {
3280 /* Prohibit the flow */
3281 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3282 err = -EPERM;
3283 goto error;
3284 } else if (num_xfrms > 0) {
3285 /* Flow transformed */
3286 dst_release(dst_orig);
3287 } else {
3288 /* Flow passes untransformed */
3289 dst_release(dst);
3290 dst = dst_orig;
3291 }
3292
3293 ok:
3294 xfrm_pols_put(pols, drop_pols);
3295 if (dst && dst->xfrm &&
3296 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3297 dst->flags |= DST_XFRM_TUNNEL;
3298 return dst;
3299
3300 nopol:
3301 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3302 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3303 err = -EPERM;
3304 goto error;
3305 }
3306 if (!(flags & XFRM_LOOKUP_ICMP)) {
3307 dst = dst_orig;
3308 goto ok;
3309 }
3310 err = -ENOENT;
3311 error:
3312 dst_release(dst);
3313 dropdst:
3314 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3315 dst_release(dst_orig);
3316 xfrm_pols_put(pols, drop_pols);
3317 return ERR_PTR(err);
3318 }
3319 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3320
3321 /* Main function: finds/creates a bundle for given flow.
3322 *
3323 * At the moment we eat a raw IP route. Mostly to speed up lookups
3324 * on interfaces with disabled IPsec.
3325 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3326 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3327 const struct flowi *fl, const struct sock *sk,
3328 int flags)
3329 {
3330 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3331 }
3332 EXPORT_SYMBOL(xfrm_lookup);
3333
3334 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3335 * Otherwise we may send out blackholed packets.
3336 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3337 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3338 const struct flowi *fl,
3339 const struct sock *sk, int flags)
3340 {
3341 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3342 flags | XFRM_LOOKUP_QUEUE |
3343 XFRM_LOOKUP_KEEP_DST_REF);
3344
3345 if (PTR_ERR(dst) == -EREMOTE)
3346 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3347
3348 if (IS_ERR(dst))
3349 dst_release(dst_orig);
3350
3351 return dst;
3352 }
3353 EXPORT_SYMBOL(xfrm_lookup_route);
3354
3355 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3356 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3357 {
3358 struct sec_path *sp = skb_sec_path(skb);
3359 struct xfrm_state *x;
3360
3361 if (!sp || idx < 0 || idx >= sp->len)
3362 return 0;
3363 x = sp->xvec[idx];
3364 if (!x->type->reject)
3365 return 0;
3366 return x->type->reject(x, skb, fl);
3367 }
3368
3369 /* When skb is transformed back to its "native" form, we have to
3370 * check policy restrictions. At the moment we make this in maximally
3371 * stupid way. Shame on me. :-) Of course, connected sockets must
3372 * have policy cached at them.
3373 */
3374
3375 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family,u32 if_id)3376 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3377 unsigned short family, u32 if_id)
3378 {
3379 if (xfrm_state_kern(x))
3380 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3381 return x->id.proto == tmpl->id.proto &&
3382 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3383 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3384 x->props.mode == tmpl->mode &&
3385 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3386 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3387 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3388 xfrm_state_addr_cmp(tmpl, x, family)) &&
3389 (if_id == 0 || if_id == x->if_id);
3390 }
3391
3392 /*
3393 * 0 or more than 0 is returned when validation is succeeded (either bypass
3394 * because of optional transport mode, or next index of the matched secpath
3395 * state with the template.
3396 * -1 is returned when no matching template is found.
3397 * Otherwise "-2 - errored_index" is returned.
3398 */
3399 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family,u32 if_id)3400 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3401 unsigned short family, u32 if_id)
3402 {
3403 int idx = start;
3404
3405 if (tmpl->optional) {
3406 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3407 return start;
3408 } else
3409 start = -1;
3410 for (; idx < sp->len; idx++) {
3411 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3412 return ++idx;
3413 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3414 if (idx < sp->verified_cnt) {
3415 /* Secpath entry previously verified, consider optional and
3416 * continue searching
3417 */
3418 continue;
3419 }
3420
3421 if (start == -1)
3422 start = -2-idx;
3423 break;
3424 }
3425 }
3426 return start;
3427 }
3428
3429 static void
decode_session4(const struct xfrm_flow_keys * flkeys,struct flowi * fl,bool reverse)3430 decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3431 {
3432 struct flowi4 *fl4 = &fl->u.ip4;
3433
3434 memset(fl4, 0, sizeof(struct flowi4));
3435
3436 if (reverse) {
3437 fl4->saddr = flkeys->addrs.ipv4.dst;
3438 fl4->daddr = flkeys->addrs.ipv4.src;
3439 fl4->fl4_sport = flkeys->ports.dst;
3440 fl4->fl4_dport = flkeys->ports.src;
3441 } else {
3442 fl4->saddr = flkeys->addrs.ipv4.src;
3443 fl4->daddr = flkeys->addrs.ipv4.dst;
3444 fl4->fl4_sport = flkeys->ports.src;
3445 fl4->fl4_dport = flkeys->ports.dst;
3446 }
3447
3448 switch (flkeys->basic.ip_proto) {
3449 case IPPROTO_GRE:
3450 fl4->fl4_gre_key = flkeys->gre.keyid;
3451 break;
3452 case IPPROTO_ICMP:
3453 fl4->fl4_icmp_type = flkeys->icmp.type;
3454 fl4->fl4_icmp_code = flkeys->icmp.code;
3455 break;
3456 }
3457
3458 fl4->flowi4_proto = flkeys->basic.ip_proto;
3459 fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3460 }
3461
3462 #if IS_ENABLED(CONFIG_IPV6)
3463 static void
decode_session6(const struct xfrm_flow_keys * flkeys,struct flowi * fl,bool reverse)3464 decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3465 {
3466 struct flowi6 *fl6 = &fl->u.ip6;
3467
3468 memset(fl6, 0, sizeof(struct flowi6));
3469
3470 if (reverse) {
3471 fl6->saddr = flkeys->addrs.ipv6.dst;
3472 fl6->daddr = flkeys->addrs.ipv6.src;
3473 fl6->fl6_sport = flkeys->ports.dst;
3474 fl6->fl6_dport = flkeys->ports.src;
3475 } else {
3476 fl6->saddr = flkeys->addrs.ipv6.src;
3477 fl6->daddr = flkeys->addrs.ipv6.dst;
3478 fl6->fl6_sport = flkeys->ports.src;
3479 fl6->fl6_dport = flkeys->ports.dst;
3480 }
3481
3482 switch (flkeys->basic.ip_proto) {
3483 case IPPROTO_GRE:
3484 fl6->fl6_gre_key = flkeys->gre.keyid;
3485 break;
3486 case IPPROTO_ICMPV6:
3487 fl6->fl6_icmp_type = flkeys->icmp.type;
3488 fl6->fl6_icmp_code = flkeys->icmp.code;
3489 break;
3490 }
3491
3492 fl6->flowi6_proto = flkeys->basic.ip_proto;
3493 }
3494 #endif
3495
__xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3496 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3497 unsigned int family, int reverse)
3498 {
3499 struct xfrm_flow_keys flkeys;
3500
3501 memset(&flkeys, 0, sizeof(flkeys));
3502 __skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys,
3503 NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3504
3505 switch (family) {
3506 case AF_INET:
3507 decode_session4(&flkeys, fl, reverse);
3508 break;
3509 #if IS_ENABLED(CONFIG_IPV6)
3510 case AF_INET6:
3511 decode_session6(&flkeys, fl, reverse);
3512 break;
3513 #endif
3514 default:
3515 return -EAFNOSUPPORT;
3516 }
3517
3518 fl->flowi_mark = skb->mark;
3519 if (reverse) {
3520 fl->flowi_oif = skb->skb_iif;
3521 } else {
3522 int oif = 0;
3523
3524 if (skb_dst(skb) && skb_dst(skb)->dev)
3525 oif = skb_dst(skb)->dev->ifindex;
3526
3527 fl->flowi_oif = oif;
3528 }
3529
3530 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3531 }
3532 EXPORT_SYMBOL(__xfrm_decode_session);
3533
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3534 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3535 {
3536 for (; k < sp->len; k++) {
3537 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3538 *idxp = k;
3539 return 1;
3540 }
3541 }
3542
3543 return 0;
3544 }
3545
icmp_err_packet(const struct flowi * fl,unsigned short family)3546 static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3547 {
3548 const struct flowi4 *fl4 = &fl->u.ip4;
3549
3550 if (family == AF_INET &&
3551 fl4->flowi4_proto == IPPROTO_ICMP &&
3552 (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3553 fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3554 return true;
3555
3556 #if IS_ENABLED(CONFIG_IPV6)
3557 if (family == AF_INET6) {
3558 const struct flowi6 *fl6 = &fl->u.ip6;
3559
3560 if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3561 (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3562 fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3563 fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3564 return true;
3565 }
3566 #endif
3567 return false;
3568 }
3569
xfrm_icmp_flow_decode(struct sk_buff * skb,unsigned short family,const struct flowi * fl,struct flowi * fl1)3570 static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3571 const struct flowi *fl, struct flowi *fl1)
3572 {
3573 bool ret = true;
3574 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3575 int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
3576 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3577
3578 if (!newskb)
3579 return true;
3580
3581 if (!pskb_pull(newskb, hl))
3582 goto out;
3583
3584 skb_reset_network_header(newskb);
3585
3586 if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
3587 goto out;
3588
3589 fl1->flowi_oif = fl->flowi_oif;
3590 fl1->flowi_mark = fl->flowi_mark;
3591 fl1->flowi_tos = fl->flowi_tos;
3592 nf_nat_decode_session(newskb, fl1, family);
3593 ret = false;
3594
3595 out:
3596 consume_skb(newskb);
3597 return ret;
3598 }
3599
xfrm_selector_inner_icmp_match(struct sk_buff * skb,unsigned short family,const struct xfrm_selector * sel,const struct flowi * fl)3600 static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3601 const struct xfrm_selector *sel,
3602 const struct flowi *fl)
3603 {
3604 bool ret = false;
3605
3606 if (icmp_err_packet(fl, family)) {
3607 struct flowi fl1;
3608
3609 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3610 return ret;
3611
3612 ret = xfrm_selector_match(sel, &fl1, family);
3613 }
3614
3615 return ret;
3616 }
3617
3618 static inline struct
xfrm_in_fwd_icmp(struct sk_buff * skb,const struct flowi * fl,unsigned short family,u32 if_id)3619 xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3620 const struct flowi *fl, unsigned short family,
3621 u32 if_id)
3622 {
3623 struct xfrm_policy *pol = NULL;
3624
3625 if (icmp_err_packet(fl, family)) {
3626 struct flowi fl1;
3627 struct net *net = dev_net(skb->dev);
3628
3629 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3630 return pol;
3631
3632 pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
3633 if (IS_ERR(pol))
3634 pol = NULL;
3635 }
3636
3637 return pol;
3638 }
3639
3640 static inline struct
xfrm_out_fwd_icmp(struct sk_buff * skb,struct flowi * fl,unsigned short family,struct dst_entry * dst)3641 dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3642 unsigned short family, struct dst_entry *dst)
3643 {
3644 if (icmp_err_packet(fl, family)) {
3645 struct net *net = dev_net(skb->dev);
3646 struct dst_entry *dst2;
3647 struct flowi fl1;
3648
3649 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3650 return dst;
3651
3652 dst_hold(dst);
3653
3654 dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3655
3656 if (IS_ERR(dst2))
3657 return dst;
3658
3659 if (dst2->xfrm) {
3660 dst_release(dst);
3661 dst = dst2;
3662 } else {
3663 dst_release(dst2);
3664 }
3665 }
3666
3667 return dst;
3668 }
3669
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3670 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3671 unsigned short family)
3672 {
3673 struct net *net = dev_net(skb->dev);
3674 struct xfrm_policy *pol;
3675 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3676 int npols = 0;
3677 int xfrm_nr;
3678 int pi;
3679 int reverse;
3680 struct flowi fl;
3681 int xerr_idx = -1;
3682 const struct xfrm_if_cb *ifcb;
3683 struct sec_path *sp;
3684 u32 if_id = 0;
3685
3686 rcu_read_lock();
3687 ifcb = xfrm_if_get_cb();
3688
3689 if (ifcb) {
3690 struct xfrm_if_decode_session_result r;
3691
3692 if (ifcb->decode_session(skb, family, &r)) {
3693 if_id = r.if_id;
3694 net = r.net;
3695 }
3696 }
3697 rcu_read_unlock();
3698
3699 reverse = dir & ~XFRM_POLICY_MASK;
3700 dir &= XFRM_POLICY_MASK;
3701
3702 if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3703 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3704 return 0;
3705 }
3706
3707 nf_nat_decode_session(skb, &fl, family);
3708
3709 /* First, check used SA against their selectors. */
3710 sp = skb_sec_path(skb);
3711 if (sp) {
3712 int i;
3713
3714 for (i = sp->len - 1; i >= 0; i--) {
3715 struct xfrm_state *x = sp->xvec[i];
3716 int ret = 0;
3717
3718 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3719 ret = 1;
3720 if (x->props.flags & XFRM_STATE_ICMP &&
3721 xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
3722 ret = 0;
3723 if (ret) {
3724 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3725 return 0;
3726 }
3727 }
3728 }
3729 }
3730
3731 pol = NULL;
3732 sk = sk_to_full_sk(sk);
3733 if (sk && sk->sk_policy[dir]) {
3734 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3735 if (IS_ERR(pol)) {
3736 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3737 return 0;
3738 }
3739 }
3740
3741 if (!pol)
3742 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3743
3744 if (IS_ERR(pol)) {
3745 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3746 return 0;
3747 }
3748
3749 if (!pol && dir == XFRM_POLICY_FWD)
3750 pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
3751
3752 if (!pol) {
3753 const bool is_crypto_offload = sp &&
3754 (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
3755
3756 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3757 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3758 return 0;
3759 }
3760
3761 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
3762 xfrm_secpath_reject(xerr_idx, skb, &fl);
3763 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3764 return 0;
3765 }
3766 return 1;
3767 }
3768
3769 /* This lockless write can happen from different cpus. */
3770 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3771
3772 pols[0] = pol;
3773 npols++;
3774 #ifdef CONFIG_XFRM_SUB_POLICY
3775 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3776 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3777 &fl, family,
3778 XFRM_POLICY_IN, if_id);
3779 if (pols[1]) {
3780 if (IS_ERR(pols[1])) {
3781 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3782 xfrm_pol_put(pols[0]);
3783 return 0;
3784 }
3785 /* This write can happen from different cpus. */
3786 WRITE_ONCE(pols[1]->curlft.use_time,
3787 ktime_get_real_seconds());
3788 npols++;
3789 }
3790 }
3791 #endif
3792
3793 if (pol->action == XFRM_POLICY_ALLOW) {
3794 static struct sec_path dummy;
3795 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3796 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3797 struct xfrm_tmpl **tpp = tp;
3798 int ti = 0;
3799 int i, k;
3800
3801 sp = skb_sec_path(skb);
3802 if (!sp)
3803 sp = &dummy;
3804
3805 for (pi = 0; pi < npols; pi++) {
3806 if (pols[pi] != pol &&
3807 pols[pi]->action != XFRM_POLICY_ALLOW) {
3808 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3809 goto reject;
3810 }
3811 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3812 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3813 goto reject_error;
3814 }
3815 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3816 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3817 }
3818 xfrm_nr = ti;
3819
3820 if (npols > 1) {
3821 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3822 tpp = stp;
3823 }
3824
3825 /* For each tunnel xfrm, find the first matching tmpl.
3826 * For each tmpl before that, find corresponding xfrm.
3827 * Order is _important_. Later we will implement
3828 * some barriers, but at the moment barriers
3829 * are implied between each two transformations.
3830 * Upon success, marks secpath entries as having been
3831 * verified to allow them to be skipped in future policy
3832 * checks (e.g. nested tunnels).
3833 */
3834 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3835 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3836 if (k < 0) {
3837 if (k < -1)
3838 /* "-2 - errored_index" returned */
3839 xerr_idx = -(2+k);
3840 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3841 goto reject;
3842 }
3843 }
3844
3845 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3846 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3847 goto reject;
3848 }
3849
3850 xfrm_pols_put(pols, npols);
3851 sp->verified_cnt = k;
3852
3853 return 1;
3854 }
3855 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3856
3857 reject:
3858 xfrm_secpath_reject(xerr_idx, skb, &fl);
3859 reject_error:
3860 xfrm_pols_put(pols, npols);
3861 return 0;
3862 }
3863 EXPORT_SYMBOL(__xfrm_policy_check);
3864
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3865 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3866 {
3867 struct net *net = dev_net(skb->dev);
3868 struct flowi fl;
3869 struct dst_entry *dst;
3870 int res = 1;
3871
3872 if (xfrm_decode_session(net, skb, &fl, family) < 0) {
3873 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3874 return 0;
3875 }
3876
3877 skb_dst_force(skb);
3878 if (!skb_dst(skb)) {
3879 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3880 return 0;
3881 }
3882
3883 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3884 if (IS_ERR(dst)) {
3885 res = 0;
3886 dst = NULL;
3887 }
3888
3889 if (dst && !dst->xfrm)
3890 dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
3891
3892 skb_dst_set(skb, dst);
3893 return res;
3894 }
3895 EXPORT_SYMBOL(__xfrm_route_forward);
3896
3897 /* Optimize later using cookies and generation ids. */
3898
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3899 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3900 {
3901 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3902 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3903 * get validated by dst_ops->check on every use. We do this
3904 * because when a normal route referenced by an XFRM dst is
3905 * obsoleted we do not go looking around for all parent
3906 * referencing XFRM dsts so that we can invalidate them. It
3907 * is just too much work. Instead we make the checks here on
3908 * every use. For example:
3909 *
3910 * XFRM dst A --> IPv4 dst X
3911 *
3912 * X is the "xdst->route" of A (X is also the "dst->path" of A
3913 * in this example). If X is marked obsolete, "A" will not
3914 * notice. That's what we are validating here via the
3915 * stale_bundle() check.
3916 *
3917 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3918 * be marked on it.
3919 * This will force stale_bundle() to fail on any xdst bundle with
3920 * this dst linked in it.
3921 */
3922 if (dst->obsolete < 0 && !stale_bundle(dst))
3923 return dst;
3924
3925 return NULL;
3926 }
3927
stale_bundle(struct dst_entry * dst)3928 static int stale_bundle(struct dst_entry *dst)
3929 {
3930 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3931 }
3932
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3933 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3934 {
3935 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3936 dst->dev = blackhole_netdev;
3937 dev_hold(dst->dev);
3938 dev_put(dev);
3939 }
3940 }
3941 EXPORT_SYMBOL(xfrm_dst_ifdown);
3942
xfrm_link_failure(struct sk_buff * skb)3943 static void xfrm_link_failure(struct sk_buff *skb)
3944 {
3945 /* Impossible. Such dst must be popped before reaches point of failure. */
3946 }
3947
xfrm_negative_advice(struct sock * sk,struct dst_entry * dst)3948 static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3949 {
3950 if (dst->obsolete)
3951 sk_dst_reset(sk);
3952 }
3953
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3954 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3955 {
3956 while (nr--) {
3957 struct xfrm_dst *xdst = bundle[nr];
3958 u32 pmtu, route_mtu_cached;
3959 struct dst_entry *dst;
3960
3961 dst = &xdst->u.dst;
3962 pmtu = dst_mtu(xfrm_dst_child(dst));
3963 xdst->child_mtu_cached = pmtu;
3964
3965 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3966
3967 route_mtu_cached = dst_mtu(xdst->route);
3968 xdst->route_mtu_cached = route_mtu_cached;
3969
3970 if (pmtu > route_mtu_cached)
3971 pmtu = route_mtu_cached;
3972
3973 dst_metric_set(dst, RTAX_MTU, pmtu);
3974 }
3975 }
3976
3977 /* Check that the bundle accepts the flow and its components are
3978 * still valid.
3979 */
3980
xfrm_bundle_ok(struct xfrm_dst * first)3981 static int xfrm_bundle_ok(struct xfrm_dst *first)
3982 {
3983 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3984 struct dst_entry *dst = &first->u.dst;
3985 struct xfrm_dst *xdst;
3986 int start_from, nr;
3987 u32 mtu;
3988
3989 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3990 (dst->dev && !netif_running(dst->dev)))
3991 return 0;
3992
3993 if (dst->flags & DST_XFRM_QUEUE)
3994 return 1;
3995
3996 start_from = nr = 0;
3997 do {
3998 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3999
4000 if (dst->xfrm->km.state != XFRM_STATE_VALID)
4001 return 0;
4002 if (xdst->xfrm_genid != dst->xfrm->genid)
4003 return 0;
4004 if (xdst->num_pols > 0 &&
4005 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
4006 return 0;
4007
4008 bundle[nr++] = xdst;
4009
4010 mtu = dst_mtu(xfrm_dst_child(dst));
4011 if (xdst->child_mtu_cached != mtu) {
4012 start_from = nr;
4013 xdst->child_mtu_cached = mtu;
4014 }
4015
4016 if (!dst_check(xdst->route, xdst->route_cookie))
4017 return 0;
4018 mtu = dst_mtu(xdst->route);
4019 if (xdst->route_mtu_cached != mtu) {
4020 start_from = nr;
4021 xdst->route_mtu_cached = mtu;
4022 }
4023
4024 dst = xfrm_dst_child(dst);
4025 } while (dst->xfrm);
4026
4027 if (likely(!start_from))
4028 return 1;
4029
4030 xdst = bundle[start_from - 1];
4031 mtu = xdst->child_mtu_cached;
4032 while (start_from--) {
4033 dst = &xdst->u.dst;
4034
4035 mtu = xfrm_state_mtu(dst->xfrm, mtu);
4036 if (mtu > xdst->route_mtu_cached)
4037 mtu = xdst->route_mtu_cached;
4038 dst_metric_set(dst, RTAX_MTU, mtu);
4039 if (!start_from)
4040 break;
4041
4042 xdst = bundle[start_from - 1];
4043 xdst->child_mtu_cached = mtu;
4044 }
4045
4046 return 1;
4047 }
4048
xfrm_default_advmss(const struct dst_entry * dst)4049 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4050 {
4051 return dst_metric_advmss(xfrm_dst_path(dst));
4052 }
4053
xfrm_mtu(const struct dst_entry * dst)4054 static unsigned int xfrm_mtu(const struct dst_entry *dst)
4055 {
4056 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4057
4058 return mtu ? : dst_mtu(xfrm_dst_path(dst));
4059 }
4060
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)4061 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4062 const void *daddr)
4063 {
4064 while (dst->xfrm) {
4065 const struct xfrm_state *xfrm = dst->xfrm;
4066
4067 dst = xfrm_dst_child(dst);
4068
4069 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4070 continue;
4071 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4072 daddr = xfrm->coaddr;
4073 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4074 daddr = &xfrm->id.daddr;
4075 }
4076 return daddr;
4077 }
4078
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)4079 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4080 struct sk_buff *skb,
4081 const void *daddr)
4082 {
4083 const struct dst_entry *path = xfrm_dst_path(dst);
4084
4085 if (!skb)
4086 daddr = xfrm_get_dst_nexthop(dst, daddr);
4087 return path->ops->neigh_lookup(path, skb, daddr);
4088 }
4089
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)4090 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4091 {
4092 const struct dst_entry *path = xfrm_dst_path(dst);
4093
4094 daddr = xfrm_get_dst_nexthop(dst, daddr);
4095 path->ops->confirm_neigh(path, daddr);
4096 }
4097
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)4098 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4099 {
4100 int err = 0;
4101
4102 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4103 return -EAFNOSUPPORT;
4104
4105 spin_lock(&xfrm_policy_afinfo_lock);
4106 if (unlikely(xfrm_policy_afinfo[family] != NULL))
4107 err = -EEXIST;
4108 else {
4109 struct dst_ops *dst_ops = afinfo->dst_ops;
4110 if (likely(dst_ops->kmem_cachep == NULL))
4111 dst_ops->kmem_cachep = xfrm_dst_cache;
4112 if (likely(dst_ops->check == NULL))
4113 dst_ops->check = xfrm_dst_check;
4114 if (likely(dst_ops->default_advmss == NULL))
4115 dst_ops->default_advmss = xfrm_default_advmss;
4116 if (likely(dst_ops->mtu == NULL))
4117 dst_ops->mtu = xfrm_mtu;
4118 if (likely(dst_ops->negative_advice == NULL))
4119 dst_ops->negative_advice = xfrm_negative_advice;
4120 if (likely(dst_ops->link_failure == NULL))
4121 dst_ops->link_failure = xfrm_link_failure;
4122 if (likely(dst_ops->neigh_lookup == NULL))
4123 dst_ops->neigh_lookup = xfrm_neigh_lookup;
4124 if (likely(!dst_ops->confirm_neigh))
4125 dst_ops->confirm_neigh = xfrm_confirm_neigh;
4126 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4127 }
4128 spin_unlock(&xfrm_policy_afinfo_lock);
4129
4130 return err;
4131 }
4132 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4133
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)4134 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4135 {
4136 struct dst_ops *dst_ops = afinfo->dst_ops;
4137 int i;
4138
4139 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4140 if (xfrm_policy_afinfo[i] != afinfo)
4141 continue;
4142 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4143 break;
4144 }
4145
4146 synchronize_rcu();
4147
4148 dst_ops->kmem_cachep = NULL;
4149 dst_ops->check = NULL;
4150 dst_ops->negative_advice = NULL;
4151 dst_ops->link_failure = NULL;
4152 }
4153 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4154
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)4155 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4156 {
4157 spin_lock(&xfrm_if_cb_lock);
4158 rcu_assign_pointer(xfrm_if_cb, ifcb);
4159 spin_unlock(&xfrm_if_cb_lock);
4160 }
4161 EXPORT_SYMBOL(xfrm_if_register_cb);
4162
xfrm_if_unregister_cb(void)4163 void xfrm_if_unregister_cb(void)
4164 {
4165 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4166 synchronize_rcu();
4167 }
4168 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4169
4170 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)4171 static int __net_init xfrm_statistics_init(struct net *net)
4172 {
4173 int rv;
4174 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4175 if (!net->mib.xfrm_statistics)
4176 return -ENOMEM;
4177 rv = xfrm_proc_init(net);
4178 if (rv < 0)
4179 free_percpu(net->mib.xfrm_statistics);
4180 return rv;
4181 }
4182
xfrm_statistics_fini(struct net * net)4183 static void xfrm_statistics_fini(struct net *net)
4184 {
4185 xfrm_proc_fini(net);
4186 free_percpu(net->mib.xfrm_statistics);
4187 }
4188 #else
xfrm_statistics_init(struct net * net)4189 static int __net_init xfrm_statistics_init(struct net *net)
4190 {
4191 return 0;
4192 }
4193
xfrm_statistics_fini(struct net * net)4194 static void xfrm_statistics_fini(struct net *net)
4195 {
4196 }
4197 #endif
4198
xfrm_policy_init(struct net * net)4199 static int __net_init xfrm_policy_init(struct net *net)
4200 {
4201 unsigned int hmask, sz;
4202 int dir, err;
4203
4204 if (net_eq(net, &init_net)) {
4205 xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4206 err = rhashtable_init(&xfrm_policy_inexact_table,
4207 &xfrm_pol_inexact_params);
4208 BUG_ON(err);
4209 }
4210
4211 hmask = 8 - 1;
4212 sz = (hmask+1) * sizeof(struct hlist_head);
4213
4214 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4215 if (!net->xfrm.policy_byidx)
4216 goto out_byidx;
4217 net->xfrm.policy_idx_hmask = hmask;
4218
4219 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4220 struct xfrm_policy_hash *htab;
4221
4222 net->xfrm.policy_count[dir] = 0;
4223 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4224
4225 htab = &net->xfrm.policy_bydst[dir];
4226 htab->table = xfrm_hash_alloc(sz);
4227 if (!htab->table)
4228 goto out_bydst;
4229 htab->hmask = hmask;
4230 htab->dbits4 = 32;
4231 htab->sbits4 = 32;
4232 htab->dbits6 = 128;
4233 htab->sbits6 = 128;
4234 }
4235 net->xfrm.policy_hthresh.lbits4 = 32;
4236 net->xfrm.policy_hthresh.rbits4 = 32;
4237 net->xfrm.policy_hthresh.lbits6 = 128;
4238 net->xfrm.policy_hthresh.rbits6 = 128;
4239
4240 seqlock_init(&net->xfrm.policy_hthresh.lock);
4241
4242 INIT_LIST_HEAD(&net->xfrm.policy_all);
4243 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4244 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4245 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4246 return 0;
4247
4248 out_bydst:
4249 for (dir--; dir >= 0; dir--) {
4250 struct xfrm_policy_hash *htab;
4251
4252 htab = &net->xfrm.policy_bydst[dir];
4253 xfrm_hash_free(htab->table, sz);
4254 }
4255 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4256 out_byidx:
4257 return -ENOMEM;
4258 }
4259
xfrm_policy_fini(struct net * net)4260 static void xfrm_policy_fini(struct net *net)
4261 {
4262 struct xfrm_pol_inexact_bin *b, *t;
4263 unsigned int sz;
4264 int dir;
4265
4266 flush_work(&net->xfrm.policy_hash_work);
4267 #ifdef CONFIG_XFRM_SUB_POLICY
4268 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4269 #endif
4270 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4271
4272 WARN_ON(!list_empty(&net->xfrm.policy_all));
4273
4274 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4275 struct xfrm_policy_hash *htab;
4276
4277 htab = &net->xfrm.policy_bydst[dir];
4278 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4279 WARN_ON(!hlist_empty(htab->table));
4280 xfrm_hash_free(htab->table, sz);
4281 }
4282
4283 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4284 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4285 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4286
4287 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4288 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4289 __xfrm_policy_inexact_prune_bin(b, true);
4290 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4291 }
4292
xfrm_net_init(struct net * net)4293 static int __net_init xfrm_net_init(struct net *net)
4294 {
4295 int rv;
4296
4297 /* Initialize the per-net locks here */
4298 spin_lock_init(&net->xfrm.xfrm_state_lock);
4299 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4300 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4301 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4302 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4303 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4304 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4305
4306 rv = xfrm_statistics_init(net);
4307 if (rv < 0)
4308 goto out_statistics;
4309 rv = xfrm_state_init(net);
4310 if (rv < 0)
4311 goto out_state;
4312 rv = xfrm_policy_init(net);
4313 if (rv < 0)
4314 goto out_policy;
4315 rv = xfrm_sysctl_init(net);
4316 if (rv < 0)
4317 goto out_sysctl;
4318
4319 rv = xfrm_nat_keepalive_net_init(net);
4320 if (rv < 0)
4321 goto out_nat_keepalive;
4322
4323 return 0;
4324
4325 out_nat_keepalive:
4326 xfrm_sysctl_fini(net);
4327 out_sysctl:
4328 xfrm_policy_fini(net);
4329 out_policy:
4330 xfrm_state_fini(net);
4331 out_state:
4332 xfrm_statistics_fini(net);
4333 out_statistics:
4334 return rv;
4335 }
4336
xfrm_net_exit(struct net * net)4337 static void __net_exit xfrm_net_exit(struct net *net)
4338 {
4339 xfrm_nat_keepalive_net_fini(net);
4340 xfrm_sysctl_fini(net);
4341 xfrm_policy_fini(net);
4342 xfrm_state_fini(net);
4343 xfrm_statistics_fini(net);
4344 }
4345
4346 static struct pernet_operations __net_initdata xfrm_net_ops = {
4347 .init = xfrm_net_init,
4348 .exit = xfrm_net_exit,
4349 };
4350
4351 static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4352 {
4353 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
4354 .offset = offsetof(struct xfrm_flow_keys, control),
4355 },
4356 {
4357 .key_id = FLOW_DISSECTOR_KEY_BASIC,
4358 .offset = offsetof(struct xfrm_flow_keys, basic),
4359 },
4360 {
4361 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4362 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4363 },
4364 {
4365 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4366 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4367 },
4368 {
4369 .key_id = FLOW_DISSECTOR_KEY_PORTS,
4370 .offset = offsetof(struct xfrm_flow_keys, ports),
4371 },
4372 {
4373 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4374 .offset = offsetof(struct xfrm_flow_keys, gre),
4375 },
4376 {
4377 .key_id = FLOW_DISSECTOR_KEY_IP,
4378 .offset = offsetof(struct xfrm_flow_keys, ip),
4379 },
4380 {
4381 .key_id = FLOW_DISSECTOR_KEY_ICMP,
4382 .offset = offsetof(struct xfrm_flow_keys, icmp),
4383 },
4384 };
4385
xfrm_init(void)4386 void __init xfrm_init(void)
4387 {
4388 skb_flow_dissector_init(&xfrm_session_dissector,
4389 xfrm_flow_dissector_keys,
4390 ARRAY_SIZE(xfrm_flow_dissector_keys));
4391
4392 register_pernet_subsys(&xfrm_net_ops);
4393 xfrm_dev_init();
4394 xfrm_input_init();
4395
4396 #ifdef CONFIG_XFRM_ESPINTCP
4397 espintcp_init();
4398 #endif
4399
4400 register_xfrm_state_bpf();
4401 xfrm_nat_keepalive_init(AF_INET);
4402 }
4403
4404 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4405 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4406 struct audit_buffer *audit_buf)
4407 {
4408 struct xfrm_sec_ctx *ctx = xp->security;
4409 struct xfrm_selector *sel = &xp->selector;
4410
4411 if (ctx)
4412 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4413 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4414
4415 switch (sel->family) {
4416 case AF_INET:
4417 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4418 if (sel->prefixlen_s != 32)
4419 audit_log_format(audit_buf, " src_prefixlen=%d",
4420 sel->prefixlen_s);
4421 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4422 if (sel->prefixlen_d != 32)
4423 audit_log_format(audit_buf, " dst_prefixlen=%d",
4424 sel->prefixlen_d);
4425 break;
4426 case AF_INET6:
4427 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4428 if (sel->prefixlen_s != 128)
4429 audit_log_format(audit_buf, " src_prefixlen=%d",
4430 sel->prefixlen_s);
4431 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4432 if (sel->prefixlen_d != 128)
4433 audit_log_format(audit_buf, " dst_prefixlen=%d",
4434 sel->prefixlen_d);
4435 break;
4436 }
4437 }
4438
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4439 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4440 {
4441 struct audit_buffer *audit_buf;
4442
4443 audit_buf = xfrm_audit_start("SPD-add");
4444 if (audit_buf == NULL)
4445 return;
4446 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4447 audit_log_format(audit_buf, " res=%u", result);
4448 xfrm_audit_common_policyinfo(xp, audit_buf);
4449 audit_log_end(audit_buf);
4450 }
4451 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4452
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4453 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4454 bool task_valid)
4455 {
4456 struct audit_buffer *audit_buf;
4457
4458 audit_buf = xfrm_audit_start("SPD-delete");
4459 if (audit_buf == NULL)
4460 return;
4461 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4462 audit_log_format(audit_buf, " res=%u", result);
4463 xfrm_audit_common_policyinfo(xp, audit_buf);
4464 audit_log_end(audit_buf);
4465 }
4466 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4467 #endif
4468
4469 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net,u32 if_id)4470 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4471 u8 dir, u8 type, struct net *net, u32 if_id)
4472 {
4473 struct xfrm_policy *pol;
4474 struct flowi fl;
4475
4476 memset(&fl, 0, sizeof(fl));
4477
4478 fl.flowi_proto = sel->proto;
4479
4480 switch (sel->family) {
4481 case AF_INET:
4482 fl.u.ip4.saddr = sel->saddr.a4;
4483 fl.u.ip4.daddr = sel->daddr.a4;
4484 if (sel->proto == IPSEC_ULPROTO_ANY)
4485 break;
4486 fl.u.flowi4_oif = sel->ifindex;
4487 fl.u.ip4.fl4_sport = sel->sport;
4488 fl.u.ip4.fl4_dport = sel->dport;
4489 break;
4490 case AF_INET6:
4491 fl.u.ip6.saddr = sel->saddr.in6;
4492 fl.u.ip6.daddr = sel->daddr.in6;
4493 if (sel->proto == IPSEC_ULPROTO_ANY)
4494 break;
4495 fl.u.flowi6_oif = sel->ifindex;
4496 fl.u.ip6.fl4_sport = sel->sport;
4497 fl.u.ip6.fl4_dport = sel->dport;
4498 break;
4499 default:
4500 return ERR_PTR(-EAFNOSUPPORT);
4501 }
4502
4503 rcu_read_lock();
4504
4505 pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
4506 if (IS_ERR_OR_NULL(pol))
4507 goto out_unlock;
4508
4509 if (!xfrm_pol_hold_rcu(pol))
4510 pol = NULL;
4511 out_unlock:
4512 rcu_read_unlock();
4513 return pol;
4514 }
4515
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4516 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4517 {
4518 int match = 0;
4519
4520 if (t->mode == m->mode && t->id.proto == m->proto &&
4521 (m->reqid == 0 || t->reqid == m->reqid)) {
4522 switch (t->mode) {
4523 case XFRM_MODE_TUNNEL:
4524 case XFRM_MODE_BEET:
4525 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4526 m->old_family) &&
4527 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4528 m->old_family)) {
4529 match = 1;
4530 }
4531 break;
4532 case XFRM_MODE_TRANSPORT:
4533 /* in case of transport mode, template does not store
4534 any IP addresses, hence we just compare mode and
4535 protocol */
4536 match = 1;
4537 break;
4538 default:
4539 break;
4540 }
4541 }
4542 return match;
4543 }
4544
4545 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4546 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4547 struct xfrm_migrate *m, int num_migrate,
4548 struct netlink_ext_ack *extack)
4549 {
4550 struct xfrm_migrate *mp;
4551 int i, j, n = 0;
4552
4553 write_lock_bh(&pol->lock);
4554 if (unlikely(pol->walk.dead)) {
4555 /* target policy has been deleted */
4556 NL_SET_ERR_MSG(extack, "Target policy not found");
4557 write_unlock_bh(&pol->lock);
4558 return -ENOENT;
4559 }
4560
4561 for (i = 0; i < pol->xfrm_nr; i++) {
4562 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4563 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4564 continue;
4565 n++;
4566 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4567 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4568 continue;
4569 /* update endpoints */
4570 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4571 sizeof(pol->xfrm_vec[i].id.daddr));
4572 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4573 sizeof(pol->xfrm_vec[i].saddr));
4574 pol->xfrm_vec[i].encap_family = mp->new_family;
4575 /* flush bundles */
4576 atomic_inc(&pol->genid);
4577 }
4578 }
4579
4580 write_unlock_bh(&pol->lock);
4581
4582 if (!n)
4583 return -ENODATA;
4584
4585 return 0;
4586 }
4587
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4588 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4589 struct netlink_ext_ack *extack)
4590 {
4591 int i, j;
4592
4593 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4594 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4595 return -EINVAL;
4596 }
4597
4598 for (i = 0; i < num_migrate; i++) {
4599 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4600 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4601 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4602 return -EINVAL;
4603 }
4604
4605 /* check if there is any duplicated entry */
4606 for (j = i + 1; j < num_migrate; j++) {
4607 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4608 sizeof(m[i].old_daddr)) &&
4609 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4610 sizeof(m[i].old_saddr)) &&
4611 m[i].proto == m[j].proto &&
4612 m[i].mode == m[j].mode &&
4613 m[i].reqid == m[j].reqid &&
4614 m[i].old_family == m[j].old_family) {
4615 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4616 return -EINVAL;
4617 }
4618 }
4619 }
4620
4621 return 0;
4622 }
4623
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap,u32 if_id,struct netlink_ext_ack * extack,struct xfrm_user_offload * xuo)4624 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4625 struct xfrm_migrate *m, int num_migrate,
4626 struct xfrm_kmaddress *k, struct net *net,
4627 struct xfrm_encap_tmpl *encap, u32 if_id,
4628 struct netlink_ext_ack *extack, struct xfrm_user_offload *xuo)
4629 {
4630 int i, err, nx_cur = 0, nx_new = 0;
4631 struct xfrm_policy *pol = NULL;
4632 struct xfrm_state *x, *xc;
4633 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4634 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4635 struct xfrm_migrate *mp;
4636
4637 /* Stage 0 - sanity checks */
4638 err = xfrm_migrate_check(m, num_migrate, extack);
4639 if (err < 0)
4640 goto out;
4641
4642 if (dir >= XFRM_POLICY_MAX) {
4643 NL_SET_ERR_MSG(extack, "Invalid policy direction");
4644 err = -EINVAL;
4645 goto out;
4646 }
4647
4648 /* Stage 1 - find policy */
4649 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4650 if (IS_ERR_OR_NULL(pol)) {
4651 NL_SET_ERR_MSG(extack, "Target policy not found");
4652 err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
4653 goto out;
4654 }
4655
4656 /* Stage 2 - find and update state(s) */
4657 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4658 if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4659 x_cur[nx_cur] = x;
4660 nx_cur++;
4661 xc = xfrm_state_migrate(x, mp, encap, net, xuo, extack);
4662 if (xc) {
4663 x_new[nx_new] = xc;
4664 nx_new++;
4665 } else {
4666 err = -ENODATA;
4667 goto restore_state;
4668 }
4669 }
4670 }
4671
4672 /* Stage 3 - update policy */
4673 err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4674 if (err < 0)
4675 goto restore_state;
4676
4677 /* Stage 4 - delete old state(s) */
4678 if (nx_cur) {
4679 xfrm_states_put(x_cur, nx_cur);
4680 xfrm_states_delete(x_cur, nx_cur);
4681 }
4682
4683 /* Stage 5 - announce */
4684 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4685
4686 xfrm_pol_put(pol);
4687
4688 return 0;
4689 out:
4690 return err;
4691
4692 restore_state:
4693 if (pol)
4694 xfrm_pol_put(pol);
4695 if (nx_cur)
4696 xfrm_states_put(x_cur, nx_cur);
4697 if (nx_new)
4698 xfrm_states_delete(x_new, nx_new);
4699
4700 return err;
4701 }
4702 EXPORT_SYMBOL(xfrm_migrate);
4703 #endif
4704