1 /*
2 * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/jhash.h>
12 #include <linux/ip.h>
13 #include <net/ipv6.h>
14
15 #include <linux/netfilter/x_tables.h>
16 #include <net/netfilter/nf_conntrack.h>
17 #include <linux/netfilter/xt_cluster.h>
18
nf_ct_orig_ipv4_src(const struct nf_conn * ct)19 static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
20 {
21 return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
22 }
23
nf_ct_orig_ipv6_src(const struct nf_conn * ct)24 static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
25 {
26 return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
27 }
28
29 static inline u_int32_t
xt_cluster_hash_ipv4(u_int32_t ip,const struct xt_cluster_match_info * info)30 xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info)
31 {
32 return jhash_1word(ip, info->hash_seed);
33 }
34
35 static inline u_int32_t
xt_cluster_hash_ipv6(const void * ip,const struct xt_cluster_match_info * info)36 xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info)
37 {
38 return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed);
39 }
40
41 static inline u_int32_t
xt_cluster_hash(const struct nf_conn * ct,const struct xt_cluster_match_info * info)42 xt_cluster_hash(const struct nf_conn *ct,
43 const struct xt_cluster_match_info *info)
44 {
45 u_int32_t hash = 0;
46
47 switch(nf_ct_l3num(ct)) {
48 case AF_INET:
49 hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info);
50 break;
51 case AF_INET6:
52 hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info);
53 break;
54 default:
55 WARN_ON(1);
56 break;
57 }
58
59 return reciprocal_scale(hash, info->total_nodes);
60 }
61
62 static inline bool
xt_cluster_ipv6_is_multicast(const struct in6_addr * addr)63 xt_cluster_ipv6_is_multicast(const struct in6_addr *addr)
64 {
65 __be32 st = addr->s6_addr32[0];
66 return ((st & htonl(0xFF000000)) == htonl(0xFF000000));
67 }
68
69 static inline bool
xt_cluster_is_multicast_addr(const struct sk_buff * skb,u_int8_t family)70 xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family)
71 {
72 bool is_multicast = false;
73
74 switch(family) {
75 case NFPROTO_IPV4:
76 is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr);
77 break;
78 case NFPROTO_IPV6:
79 is_multicast =
80 xt_cluster_ipv6_is_multicast(&ipv6_hdr(skb)->daddr);
81 break;
82 default:
83 WARN_ON(1);
84 break;
85 }
86 return is_multicast;
87 }
88
89 static bool
xt_cluster_mt(const struct sk_buff * skb,struct xt_action_param * par)90 xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
91 {
92 struct sk_buff *pskb = (struct sk_buff *)skb;
93 const struct xt_cluster_match_info *info = par->matchinfo;
94 const struct nf_conn *ct;
95 enum ip_conntrack_info ctinfo;
96 unsigned long hash;
97
98 /* This match assumes that all nodes see the same packets. This can be
99 * achieved if the switch that connects the cluster nodes support some
100 * sort of 'port mirroring'. However, if your switch does not support
101 * this, your cluster nodes can reply ARP request using a multicast MAC
102 * address. Thus, your switch will flood the same packets to the
103 * cluster nodes with the same multicast MAC address. Using a multicast
104 * link address is a RFC 1812 (section 3.3.2) violation, but this works
105 * fine in practise.
106 *
107 * Unfortunately, if you use the multicast MAC address, the link layer
108 * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted
109 * by TCP and others for packets coming to this node. For that reason,
110 * this match mangles skbuff's pkt_type if it detects a packet
111 * addressed to a unicast address but using PACKET_MULTICAST. Yes, I
112 * know, matches should not alter packets, but we are doing this here
113 * because we would need to add a PKTTYPE target for this sole purpose.
114 */
115 if (!xt_cluster_is_multicast_addr(skb, par->family) &&
116 skb->pkt_type == PACKET_MULTICAST) {
117 pskb->pkt_type = PACKET_HOST;
118 }
119
120 ct = nf_ct_get(skb, &ctinfo);
121 if (ct == NULL)
122 return false;
123
124 if (nf_ct_is_untracked(ct))
125 return false;
126
127 if (ct->master)
128 hash = xt_cluster_hash(ct->master, info);
129 else
130 hash = xt_cluster_hash(ct, info);
131
132 return !!((1 << hash) & info->node_mask) ^
133 !!(info->flags & XT_CLUSTER_F_INV);
134 }
135
xt_cluster_mt_checkentry(const struct xt_mtchk_param * par)136 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
137 {
138 struct xt_cluster_match_info *info = par->matchinfo;
139
140 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
141 pr_info("you have exceeded the maximum "
142 "number of cluster nodes (%u > %u)\n",
143 info->total_nodes, XT_CLUSTER_NODES_MAX);
144 return -EINVAL;
145 }
146 if (info->node_mask >= (1ULL << info->total_nodes)) {
147 pr_info("this node mask cannot be "
148 "higher than the total number of nodes\n");
149 return -EDOM;
150 }
151 return 0;
152 }
153
154 static struct xt_match xt_cluster_match __read_mostly = {
155 .name = "cluster",
156 .family = NFPROTO_UNSPEC,
157 .match = xt_cluster_mt,
158 .checkentry = xt_cluster_mt_checkentry,
159 .matchsize = sizeof(struct xt_cluster_match_info),
160 .me = THIS_MODULE,
161 };
162
xt_cluster_mt_init(void)163 static int __init xt_cluster_mt_init(void)
164 {
165 return xt_register_match(&xt_cluster_match);
166 }
167
xt_cluster_mt_fini(void)168 static void __exit xt_cluster_mt_fini(void)
169 {
170 xt_unregister_match(&xt_cluster_match);
171 }
172
173 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
174 MODULE_LICENSE("GPL");
175 MODULE_DESCRIPTION("Xtables: hash-based cluster match");
176 MODULE_ALIAS("ipt_cluster");
177 MODULE_ALIAS("ip6t_cluster");
178 module_init(xt_cluster_mt_init);
179 module_exit(xt_cluster_mt_fini);
180