1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/netlink.h>
12 #include <linux/netfilter.h>
13 #include <linux/if_arp.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables_offload.h>
17 #include <net/netfilter/nf_tables.h>
18
19 struct nft_cmp_expr {
20 struct nft_data data;
21 enum nft_registers sreg:8;
22 u8 len;
23 enum nft_cmp_ops op:8;
24 };
25
nft_cmp_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)26 void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29 {
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 int d;
32
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
34 switch (priv->op) {
35 case NFT_CMP_EQ:
36 if (d != 0)
37 goto mismatch;
38 break;
39 case NFT_CMP_NEQ:
40 if (d == 0)
41 goto mismatch;
42 break;
43 case NFT_CMP_LT:
44 if (d == 0)
45 goto mismatch;
46 fallthrough;
47 case NFT_CMP_LTE:
48 if (d > 0)
49 goto mismatch;
50 break;
51 case NFT_CMP_GT:
52 if (d == 0)
53 goto mismatch;
54 fallthrough;
55 case NFT_CMP_GTE:
56 if (d < 0)
57 goto mismatch;
58 break;
59 }
60 return;
61
62 mismatch:
63 regs->verdict.code = NFT_BREAK;
64 }
65
66 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
70 };
71
nft_cmp_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])72 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
74 {
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc;
77 int err;
78
79 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
80 tb[NFTA_CMP_DATA]);
81 if (err < 0)
82 return err;
83
84 if (desc.type != NFT_DATA_VALUE) {
85 err = -EINVAL;
86 nft_data_release(&priv->data, desc.type);
87 return err;
88 }
89
90 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
91 err = nft_validate_register_load(priv->sreg, desc.len);
92 if (err < 0)
93 return err;
94
95 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
96 priv->len = desc.len;
97 return 0;
98 }
99
nft_cmp_dump(struct sk_buff * skb,const struct nft_expr * expr)100 static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
101 {
102 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
103
104 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
105 goto nla_put_failure;
106 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
107 goto nla_put_failure;
108
109 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
110 NFT_DATA_VALUE, priv->len) < 0)
111 goto nla_put_failure;
112 return 0;
113
114 nla_put_failure:
115 return -1;
116 }
117
118 union nft_cmp_offload_data {
119 u16 val16;
120 u32 val32;
121 u64 val64;
122 };
123
nft_payload_n2h(union nft_cmp_offload_data * data,const u8 * val,u32 len)124 static void nft_payload_n2h(union nft_cmp_offload_data *data,
125 const u8 *val, u32 len)
126 {
127 switch (len) {
128 case 2:
129 data->val16 = ntohs(*((u16 *)val));
130 break;
131 case 4:
132 data->val32 = ntohl(*((u32 *)val));
133 break;
134 case 8:
135 data->val64 = be64_to_cpu(*((u64 *)val));
136 break;
137 default:
138 WARN_ON_ONCE(1);
139 break;
140 }
141 }
142
__nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_cmp_expr * priv)143 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
144 struct nft_flow_rule *flow,
145 const struct nft_cmp_expr *priv)
146 {
147 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
148 union nft_cmp_offload_data _data, _datamask;
149 u8 *mask = (u8 *)&flow->match.mask;
150 u8 *key = (u8 *)&flow->match.key;
151 u8 *data, *datamask;
152
153 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
154 return -EOPNOTSUPP;
155
156 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
157 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
158 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
159 data = (u8 *)&_data;
160 datamask = (u8 *)&_datamask;
161 } else {
162 data = (u8 *)&priv->data;
163 datamask = (u8 *)®->mask;
164 }
165
166 memcpy(key + reg->offset, data, reg->len);
167 memcpy(mask + reg->offset, datamask, reg->len);
168
169 flow->match.dissector.used_keys |= BIT(reg->key);
170 flow->match.dissector.offset[reg->key] = reg->base_offset;
171
172 if (reg->key == FLOW_DISSECTOR_KEY_META &&
173 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
174 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
175 return -EOPNOTSUPP;
176
177 nft_offload_update_dependency(ctx, &priv->data, reg->len);
178
179 return 0;
180 }
181
nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)182 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
183 struct nft_flow_rule *flow,
184 const struct nft_expr *expr)
185 {
186 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
187
188 return __nft_cmp_offload(ctx, flow, priv);
189 }
190
191 static const struct nft_expr_ops nft_cmp_ops = {
192 .type = &nft_cmp_type,
193 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
194 .eval = nft_cmp_eval,
195 .init = nft_cmp_init,
196 .dump = nft_cmp_dump,
197 .offload = nft_cmp_offload,
198 };
199
nft_cmp_fast_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])200 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
201 const struct nft_expr *expr,
202 const struct nlattr * const tb[])
203 {
204 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
205 struct nft_data_desc desc;
206 struct nft_data data;
207 int err;
208
209 err = nft_data_init(NULL, &data, sizeof(data), &desc,
210 tb[NFTA_CMP_DATA]);
211 if (err < 0)
212 return err;
213
214 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
215 err = nft_validate_register_load(priv->sreg, desc.len);
216 if (err < 0)
217 return err;
218
219 desc.len *= BITS_PER_BYTE;
220
221 priv->mask = nft_cmp_fast_mask(desc.len);
222 priv->data = data.data[0] & priv->mask;
223 priv->len = desc.len;
224 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
225 return 0;
226 }
227
nft_cmp_fast_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)228 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
229 struct nft_flow_rule *flow,
230 const struct nft_expr *expr)
231 {
232 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
233 struct nft_cmp_expr cmp = {
234 .data = {
235 .data = {
236 [0] = priv->data,
237 },
238 },
239 .sreg = priv->sreg,
240 .len = priv->len / BITS_PER_BYTE,
241 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
242 };
243
244 return __nft_cmp_offload(ctx, flow, &cmp);
245 }
246
nft_cmp_fast_dump(struct sk_buff * skb,const struct nft_expr * expr)247 static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
248 {
249 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
250 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
251 struct nft_data data;
252
253 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
254 goto nla_put_failure;
255 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
256 goto nla_put_failure;
257
258 data.data[0] = priv->data;
259 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
260 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
261 goto nla_put_failure;
262 return 0;
263
264 nla_put_failure:
265 return -1;
266 }
267
268 const struct nft_expr_ops nft_cmp_fast_ops = {
269 .type = &nft_cmp_type,
270 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
271 .eval = NULL, /* inlined */
272 .init = nft_cmp_fast_init,
273 .dump = nft_cmp_fast_dump,
274 .offload = nft_cmp_fast_offload,
275 };
276
277 static const struct nft_expr_ops *
nft_cmp_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])278 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
279 {
280 struct nft_data_desc desc;
281 struct nft_data data;
282 enum nft_cmp_ops op;
283 int err;
284
285 if (tb[NFTA_CMP_SREG] == NULL ||
286 tb[NFTA_CMP_OP] == NULL ||
287 tb[NFTA_CMP_DATA] == NULL)
288 return ERR_PTR(-EINVAL);
289
290 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
291 switch (op) {
292 case NFT_CMP_EQ:
293 case NFT_CMP_NEQ:
294 case NFT_CMP_LT:
295 case NFT_CMP_LTE:
296 case NFT_CMP_GT:
297 case NFT_CMP_GTE:
298 break;
299 default:
300 return ERR_PTR(-EINVAL);
301 }
302
303 err = nft_data_init(NULL, &data, sizeof(data), &desc,
304 tb[NFTA_CMP_DATA]);
305 if (err < 0)
306 return ERR_PTR(err);
307
308 if (desc.type != NFT_DATA_VALUE) {
309 err = -EINVAL;
310 goto err1;
311 }
312
313 if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ))
314 return &nft_cmp_fast_ops;
315
316 return &nft_cmp_ops;
317 err1:
318 nft_data_release(&data, desc.type);
319 return ERR_PTR(-EINVAL);
320 }
321
322 struct nft_expr_type nft_cmp_type __read_mostly = {
323 .name = "cmp",
324 .select_ops = nft_cmp_select_ops,
325 .policy = nft_cmp_policy,
326 .maxattr = NFTA_CMP_MAX,
327 .owner = THIS_MODULE,
328 };
329