• Home
  • Raw
  • Download

Lines Matching +full:ports +full:- +full:block +full:- +full:pack +full:- +full:mode

1 /* SPDX-License-Identifier: GPL-2.0 */
46 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
48 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
50 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
60 ------------------------------------
63 - policy rule, struct xfrm_policy (=SPD entry)
64 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65 - instance of a transformer, struct xfrm_state (=SA)
66 - template to clone xfrm_state, struct xfrm_tmpl
75 If "action" is "block", then we prohibit the flow, otherwise:
79 to a complete xfrm_state (see below) and we pack bundle of transformations
82 dst -. xfrm .-> xfrm_state #1
83 |---. child .-> dst -. xfrm .-> xfrm_state #2
84 |---. child .-> dst -. xfrm .-> xfrm_state #3
85 |---. child .-> NULL
87 Bundles are cached at xrfm_policy struct (field ->bundles).
91 -----------------------
93 1. ->mode Mode: transport or tunnel
94 2. ->id.proto Protocol: AH/ESP/IPCOMP
95 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
97 4. ->id.spi If not zero, static SPI.
98 5. ->saddr Local tunnel endpoint, ignored for transport mode.
99 6. ->algos List of allowed algos. Plain bitmask now.
101 7. ->share Sharing mode.
102 Q: how to implement private sharing mode? To add struct sock* to
106 with appropriate mode/proto/algo, permitted by selector.
117 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
175 u8 mode; member
205 /* Data for care-of address */
245 /* used to fix curlft->add_time when changing date */
272 return read_pnet(&x->xs_net); in xs_net()
275 /* xflags - make enum if more show up */
442 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || in xfrm_ip2inner_mode()
443 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) in xfrm_ip2inner_mode()
444 return &x->inner_mode; in xfrm_ip2inner_mode()
446 return &x->inner_mode_iaf; in xfrm_ip2inner_mode()
451 * daddr - destination of tunnel, may be zero for transport mode.
452 * spi - zero to acquire spi. Not zero if spi is static, then
454 * proto - AH/ESP/IPCOMP
465 /* Mode: transport, tunnel etc. */
466 u8 mode; member
468 /* Sharing mode: unique, this session only, this user only etc. */
538 return read_pnet(&xp->xp_net); in xp_net()
554 u8 mode; member
605 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
628 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
632 * to transmit header information to the mode input/output functions.
660 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
674 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
769 refcount_inc(&policy->refcnt); in xfrm_pol_hold()
776 if (refcount_dec_and_test(&policy->refcnt)) in xfrm_pol_put()
783 for (i = npols - 1; i >= 0; --i) in xfrm_pols_put()
791 refcount_dec(&x->refcnt); in __xfrm_state_put()
796 if (refcount_dec_and_test(&x->refcnt)) in xfrm_state_put()
802 if (refcount_dec_and_test(&x->refcnt)) in xfrm_state_put_sync()
808 refcount_inc(&x->refcnt); in xfrm_state_hold()
829 mask = htonl((0xffffffff) << (32 - pbi)); in addr_match()
843 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen))); in addr4_match()
850 switch(fl->flowi_proto) { in xfrm_flowi_sport()
855 port = uli->ports.sport; in xfrm_flowi_sport()
859 port = htons(uli->icmpt.type); in xfrm_flowi_sport()
862 port = htons(uli->mht.type); in xfrm_flowi_sport()
865 port = htons(ntohl(uli->gre_key) >> 16); in xfrm_flowi_sport()
877 switch(fl->flowi_proto) { in xfrm_flowi_dport()
882 port = uli->ports.dport; in xfrm_flowi_dport()
886 port = htons(uli->icmpt.code); in xfrm_flowi_dport()
889 port = htons(ntohl(uli->gre_key) & 0xffff); in xfrm_flowi_dport()
901 /* If neither has a context --> match
908 (s1->ctx_sid == s2->ctx_sid) && in xfrm_sec_ctx_match()
909 (s1->ctx_doi == s2->ctx_doi) && in xfrm_sec_ctx_match()
910 (s1->ctx_alg == s2->ctx_alg))); in xfrm_sec_ctx_match()
921 * xdst->child points to the next element of bundle.
922 * dst->xfrm points to an instanse of transformer.
952 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { in xfrm_dst_path()
955 return xdst->path; in xfrm_dst_path()
964 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { in xfrm_dst_child()
966 return xdst->child; in xfrm_dst_child()
975 xdst->child = child; in xfrm_dst_set_child()
980 xfrm_pols_put(xdst->pols, xdst->num_pols); in xfrm_dst_destroy()
981 dst_release(xdst->route); in xfrm_dst_destroy()
982 if (likely(xdst->u.dst.xfrm)) in xfrm_dst_destroy()
983 xfrm_state_put(xdst->u.dst.xfrm); in xfrm_dst_destroy()
1058 return addr->a4 == 0; in xfrm_addr_any()
1060 return ipv6_addr_any(&addr->in6); in xfrm_addr_any()
1068 return (tmpl->saddr.a4 && in __xfrm4_state_addr_cmp()
1069 tmpl->saddr.a4 != x->props.saddr.a4); in __xfrm4_state_addr_cmp()
1075 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && in __xfrm6_state_addr_cmp()
1076 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); in __xfrm6_state_addr_cmp()
1098 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) in __xfrm_check_nopolicy()
1099 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT; in __xfrm_check_nopolicy()
1111 return IPCB(skb)->flags & IPSKB_NOPOLICY; in __xfrm_check_dev_nopolicy()
1113 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY); in __xfrm_check_dev_nopolicy()
1120 struct net *net = dev_net(skb->dev); in __xfrm_policy_check2()
1123 if (sk && sk->sk_policy[XFRM_POLICY_IN]) in __xfrm_policy_check2()
1178 struct net *net = dev_net(skb->dev); in xfrm_route_forward()
1180 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] && in xfrm_route_forward()
1181 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT) in xfrm_route_forward()
1184 return (skb_dst(skb)->flags & DST_NOXFRM) || in xfrm_route_forward()
1204 sk->sk_policy[0] = NULL; in xfrm_sk_clone_policy()
1205 sk->sk_policy[1] = NULL; in xfrm_sk_clone_policy()
1206 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) in xfrm_sk_clone_policy()
1217 pol = rcu_dereference_protected(sk->sk_policy[0], 1); in xfrm_sk_free_policy()
1220 sk->sk_policy[0] = NULL; in xfrm_sk_free_policy()
1222 pol = rcu_dereference_protected(sk->sk_policy[1], 1); in xfrm_sk_free_policy()
1225 sk->sk_policy[1] = NULL; in xfrm_sk_free_policy()
1251 return -ENOSYS; in xfrm_decode_session_reverse()
1270 return (xfrm_address_t *)&fl->u.ip4.daddr; in xfrm_flowi_daddr()
1272 return (xfrm_address_t *)&fl->u.ip6.daddr; in xfrm_flowi_daddr()
1282 return (xfrm_address_t *)&fl->u.ip4.saddr; in xfrm_flowi_saddr()
1284 return (xfrm_address_t *)&fl->u.ip6.saddr; in xfrm_flowi_saddr()
1296 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); in xfrm_flowi_addr_get()
1297 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); in xfrm_flowi_addr_get()
1300 saddr->in6 = fl->u.ip6.saddr; in xfrm_flowi_addr_get()
1301 daddr->in6 = fl->u.ip6.daddr; in xfrm_flowi_addr_get()
1310 if (daddr->a4 == x->id.daddr.a4 && in __xfrm4_state_addr_check()
1311 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) in __xfrm4_state_addr_check()
1320 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && in __xfrm6_state_addr_check()
1321 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) || in __xfrm6_state_addr_check()
1323 ipv6_addr_any((struct in6_addr *)&x->props.saddr))) in __xfrm6_state_addr_check()
1349 (const xfrm_address_t *)&fl->u.ip4.daddr, in xfrm_state_addr_flow_check()
1350 (const xfrm_address_t *)&fl->u.ip4.saddr); in xfrm_state_addr_flow_check()
1353 (const xfrm_address_t *)&fl->u.ip6.daddr, in xfrm_state_addr_flow_check()
1354 (const xfrm_address_t *)&fl->u.ip6.saddr); in xfrm_state_addr_flow_check()
1361 return atomic_read(&x->tunnel_users); in xfrm_state_kern()
1524 u8 mode, u8 proto, u32 reqid);
1609 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; in xfrm4_rcv_spi()
1610 XFRM_SPI_SKB_CB(skb)->family = AF_INET; in xfrm4_rcv_spi()
1611 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); in xfrm4_rcv_spi()
1654 return -ENOPROTOOPT; in xfrm_user_policy()
1687 u8 mode, u32 reqid, u32 if_id, u8 proto,
1745 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0; in xfrm_addr_equal()
1763 nlsk = rcu_dereference(net->xfrm.nlsk); in xfrm_aevent_is_on()
1776 nlsk = rcu_dereference(net->xfrm.nlsk); in xfrm_acquire_is_on()
1787 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in aead_len()
1792 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in xfrm_alg_len()
1797 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); in xfrm_alg_auth_len()
1802 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32); in xfrm_replay_state_esn_len()
1810 x->replay_esn = kmemdup(orig->replay_esn, in xfrm_replay_clone()
1811 xfrm_replay_state_esn_len(orig->replay_esn), in xfrm_replay_clone()
1813 if (!x->replay_esn) in xfrm_replay_clone()
1814 return -ENOMEM; in xfrm_replay_clone()
1815 x->preplay_esn = kmemdup(orig->preplay_esn, in xfrm_replay_clone()
1816 xfrm_replay_state_esn_len(orig->preplay_esn), in xfrm_replay_clone()
1818 if (!x->preplay_esn) in xfrm_replay_clone()
1819 return -ENOMEM; in xfrm_replay_clone()
1860 return sp->xvec[sp->len - 1]; in xfrm_input_state()
1869 if (!sp || !sp->olen || sp->len != sp->olen) in xfrm_offload()
1872 return &sp->ovec[sp->olen - 1]; in xfrm_offload()
1890 struct xfrm_state_offload *xso = &x->xso; in xfrm_dev_state_advance_esn()
1892 if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) in xfrm_dev_state_advance_esn()
1893 xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); in xfrm_dev_state_advance_esn()
1898 struct xfrm_state *x = dst->xfrm; in xfrm_dst_offload_ok()
1901 if (!x || !x->type_offload) in xfrm_dst_offload_ok()
1905 if (!x->xso.offload_handle && !xdst->child->xfrm) in xfrm_dst_offload_ok()
1907 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && in xfrm_dst_offload_ok()
1908 !xdst->child->xfrm) in xfrm_dst_offload_ok()
1916 struct xfrm_state_offload *xso = &x->xso; in xfrm_dev_state_delete()
1918 if (xso->dev) in xfrm_dev_state_delete()
1919 xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); in xfrm_dev_state_delete()
1924 struct xfrm_state_offload *xso = &x->xso; in xfrm_dev_state_free()
1925 struct net_device *dev = xso->dev; in xfrm_dev_state_free()
1927 if (dev && dev->xfrmdev_ops) { in xfrm_dev_state_free()
1928 if (dev->xfrmdev_ops->xdo_dev_state_free) in xfrm_dev_state_free()
1929 dev->xfrmdev_ops->xdo_dev_state_free(x); in xfrm_dev_state_free()
1930 xso->dev = NULL; in xfrm_dev_state_free()
1981 m->v = m->m = 0; in xfrm_mark_get()
1983 return m->v & m->m; in xfrm_mark_get()
1990 if (m->m | m->v) in xfrm_mark_put()
1997 struct xfrm_mark *m = &x->props.smark; in xfrm_smark_get()
1999 return (m->v & m->m) | (mark & ~m->m); in xfrm_smark_get()
2018 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) in xfrm_tunnel_check()
2022 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) in xfrm_tunnel_check()
2026 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)) in xfrm_tunnel_check()
2027 return -EINVAL; in xfrm_tunnel_check()
2039 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2044 /* Translate 32-bit user_policy from sockptr */
2070 if (!sk || sk->sk_family != AF_INET6) in xfrm6_local_dontfrag()
2073 proto = sk->sk_protocol; in xfrm6_local_dontfrag()
2075 return inet6_sk(sk)->dontfrag; in xfrm6_local_dontfrag()