1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/netlink.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter/nf_tables.h>
14 #include <net/netfilter/nf_tables_core.h>
15 #include <net/netfilter/nf_tables_offload.h>
16 #include <net/netfilter/nf_tables.h>
17
18 struct nft_cmp_expr {
19 struct nft_data data;
20 u8 sreg;
21 u8 len;
22 enum nft_cmp_ops op:8;
23 };
24
nft_cmp_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)25 void nft_cmp_eval(const struct nft_expr *expr,
26 struct nft_regs *regs,
27 const struct nft_pktinfo *pkt)
28 {
29 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
30 int d;
31
32 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
33 switch (priv->op) {
34 case NFT_CMP_EQ:
35 if (d != 0)
36 goto mismatch;
37 break;
38 case NFT_CMP_NEQ:
39 if (d == 0)
40 goto mismatch;
41 break;
42 case NFT_CMP_LT:
43 if (d == 0)
44 goto mismatch;
45 /* fall through */
46 case NFT_CMP_LTE:
47 if (d > 0)
48 goto mismatch;
49 break;
50 case NFT_CMP_GT:
51 if (d == 0)
52 goto mismatch;
53 /* fall through */
54 case NFT_CMP_GTE:
55 if (d < 0)
56 goto mismatch;
57 break;
58 }
59 return;
60
61 mismatch:
62 regs->verdict.code = NFT_BREAK;
63 }
64
65 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
66 [NFTA_CMP_SREG] = { .type = NLA_U32 },
67 [NFTA_CMP_OP] = { .type = NLA_U32 },
68 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
69 };
70
nft_cmp_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])71 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
72 const struct nlattr * const tb[])
73 {
74 struct nft_cmp_expr *priv = nft_expr_priv(expr);
75 struct nft_data_desc desc;
76 int err;
77
78 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
79 tb[NFTA_CMP_DATA]);
80 if (err < 0)
81 return err;
82
83 if (desc.type != NFT_DATA_VALUE) {
84 err = -EINVAL;
85 nft_data_release(&priv->data, desc.type);
86 return err;
87 }
88
89 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
90 if (err < 0)
91 return err;
92
93 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
94 priv->len = desc.len;
95 return 0;
96 }
97
nft_cmp_dump(struct sk_buff * skb,const struct nft_expr * expr)98 static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
99 {
100 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
101
102 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
103 goto nla_put_failure;
104 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
105 goto nla_put_failure;
106
107 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
108 NFT_DATA_VALUE, priv->len) < 0)
109 goto nla_put_failure;
110 return 0;
111
112 nla_put_failure:
113 return -1;
114 }
115
__nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_cmp_expr * priv)116 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
117 struct nft_flow_rule *flow,
118 const struct nft_cmp_expr *priv)
119 {
120 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
121 u8 *mask = (u8 *)&flow->match.mask;
122 u8 *key = (u8 *)&flow->match.key;
123
124 if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
125 return -EOPNOTSUPP;
126
127 memcpy(key + reg->offset, &priv->data, priv->len);
128 memcpy(mask + reg->offset, ®->mask, priv->len);
129
130 flow->match.dissector.used_keys |= BIT(reg->key);
131 flow->match.dissector.offset[reg->key] = reg->base_offset;
132
133 nft_offload_update_dependency(ctx, &priv->data, priv->len);
134
135 return 0;
136 }
137
nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)138 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
139 struct nft_flow_rule *flow,
140 const struct nft_expr *expr)
141 {
142 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
143
144 return __nft_cmp_offload(ctx, flow, priv);
145 }
146
147 static const struct nft_expr_ops nft_cmp_ops = {
148 .type = &nft_cmp_type,
149 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
150 .eval = nft_cmp_eval,
151 .init = nft_cmp_init,
152 .dump = nft_cmp_dump,
153 .offload = nft_cmp_offload,
154 };
155
nft_cmp_fast_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])156 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
157 const struct nft_expr *expr,
158 const struct nlattr * const tb[])
159 {
160 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
161 struct nft_data_desc desc;
162 struct nft_data data;
163 u32 mask;
164 int err;
165
166 err = nft_data_init(NULL, &data, sizeof(data), &desc,
167 tb[NFTA_CMP_DATA]);
168 if (err < 0)
169 return err;
170
171 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
172 if (err < 0)
173 return err;
174
175 desc.len *= BITS_PER_BYTE;
176 mask = nft_cmp_fast_mask(desc.len);
177
178 priv->data = data.data[0] & mask;
179 priv->len = desc.len;
180 return 0;
181 }
182
nft_cmp_fast_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)183 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
184 struct nft_flow_rule *flow,
185 const struct nft_expr *expr)
186 {
187 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
188 struct nft_cmp_expr cmp = {
189 .data = {
190 .data = {
191 [0] = priv->data,
192 },
193 },
194 .sreg = priv->sreg,
195 .len = priv->len / BITS_PER_BYTE,
196 .op = NFT_CMP_EQ,
197 };
198
199 return __nft_cmp_offload(ctx, flow, &cmp);
200 }
201
nft_cmp_fast_dump(struct sk_buff * skb,const struct nft_expr * expr)202 static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
203 {
204 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
205 struct nft_data data;
206
207 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
208 goto nla_put_failure;
209 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
210 goto nla_put_failure;
211
212 data.data[0] = priv->data;
213 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
214 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
215 goto nla_put_failure;
216 return 0;
217
218 nla_put_failure:
219 return -1;
220 }
221
222 const struct nft_expr_ops nft_cmp_fast_ops = {
223 .type = &nft_cmp_type,
224 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
225 .eval = NULL, /* inlined */
226 .init = nft_cmp_fast_init,
227 .dump = nft_cmp_fast_dump,
228 .offload = nft_cmp_fast_offload,
229 };
230
231 static const struct nft_expr_ops *
nft_cmp_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])232 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
233 {
234 struct nft_data_desc desc;
235 struct nft_data data;
236 enum nft_cmp_ops op;
237 int err;
238
239 if (tb[NFTA_CMP_SREG] == NULL ||
240 tb[NFTA_CMP_OP] == NULL ||
241 tb[NFTA_CMP_DATA] == NULL)
242 return ERR_PTR(-EINVAL);
243
244 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
245 switch (op) {
246 case NFT_CMP_EQ:
247 case NFT_CMP_NEQ:
248 case NFT_CMP_LT:
249 case NFT_CMP_LTE:
250 case NFT_CMP_GT:
251 case NFT_CMP_GTE:
252 break;
253 default:
254 return ERR_PTR(-EINVAL);
255 }
256
257 err = nft_data_init(NULL, &data, sizeof(data), &desc,
258 tb[NFTA_CMP_DATA]);
259 if (err < 0)
260 return ERR_PTR(err);
261
262 if (desc.type != NFT_DATA_VALUE) {
263 err = -EINVAL;
264 goto err1;
265 }
266
267 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
268 return &nft_cmp_fast_ops;
269
270 return &nft_cmp_ops;
271 err1:
272 nft_data_release(&data, desc.type);
273 return ERR_PTR(-EINVAL);
274 }
275
276 struct nft_expr_type nft_cmp_type __read_mostly = {
277 .name = "cmp",
278 .select_ops = nft_cmp_select_ops,
279 .policy = nft_cmp_policy,
280 .maxattr = NFTA_CMP_MAX,
281 .owner = THIS_MODULE,
282 };
283