1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25
26 /* add vlan header into the user buffer for if tag was removed by offloads */
27 static bool
nft_payload_copy_vlan(u32 * d,const struct sk_buff * skb,u8 offset,u8 len)28 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
29 {
30 int mac_off = skb_mac_header(skb) - skb->data;
31 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
32 struct vlan_ethhdr veth;
33
34 vlanh = (u8 *) &veth;
35 if (offset < ETH_HLEN) {
36 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
37
38 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
39 return false;
40
41 veth.h_vlan_proto = skb->vlan_proto;
42
43 memcpy(dst_u8, vlanh + offset, ethlen);
44
45 len -= ethlen;
46 if (len == 0)
47 return true;
48
49 dst_u8 += ethlen;
50 offset = ETH_HLEN;
51 } else if (offset >= VLAN_ETH_HLEN) {
52 offset -= VLAN_HLEN;
53 goto skip;
54 }
55
56 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
57 veth.h_vlan_encapsulated_proto = skb->protocol;
58
59 vlanh += offset;
60
61 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
62 memcpy(dst_u8, vlanh, vlan_len);
63
64 len -= vlan_len;
65 if (!len)
66 return true;
67
68 dst_u8 += vlan_len;
69 skip:
70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
71 }
72
nft_payload_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)73 void nft_payload_eval(const struct nft_expr *expr,
74 struct nft_regs *regs,
75 const struct nft_pktinfo *pkt)
76 {
77 const struct nft_payload *priv = nft_expr_priv(expr);
78 const struct sk_buff *skb = pkt->skb;
79 u32 *dest = ®s->data[priv->dreg];
80 int offset;
81
82 dest[priv->len / NFT_REG32_SIZE] = 0;
83 switch (priv->base) {
84 case NFT_PAYLOAD_LL_HEADER:
85 if (!skb_mac_header_was_set(skb))
86 goto err;
87
88 if (skb_vlan_tag_present(skb)) {
89 if (!nft_payload_copy_vlan(dest, skb,
90 priv->offset, priv->len))
91 goto err;
92 return;
93 }
94 offset = skb_mac_header(skb) - skb->data;
95 break;
96 case NFT_PAYLOAD_NETWORK_HEADER:
97 offset = skb_network_offset(skb);
98 break;
99 case NFT_PAYLOAD_TRANSPORT_HEADER:
100 if (!pkt->tprot_set)
101 goto err;
102 offset = pkt->xt.thoff;
103 break;
104 default:
105 BUG();
106 }
107 offset += priv->offset;
108
109 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
110 goto err;
111 return;
112 err:
113 regs->verdict.code = NFT_BREAK;
114 }
115
116 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
117 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
119 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
120 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
121 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
122 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
123 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
124 };
125
nft_payload_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])126 static int nft_payload_init(const struct nft_ctx *ctx,
127 const struct nft_expr *expr,
128 const struct nlattr * const tb[])
129 {
130 struct nft_payload *priv = nft_expr_priv(expr);
131
132 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
133 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
134 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
135 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
136
137 return nft_validate_register_store(ctx, priv->dreg, NULL,
138 NFT_DATA_VALUE, priv->len);
139 }
140
nft_payload_dump(struct sk_buff * skb,const struct nft_expr * expr)141 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
142 {
143 const struct nft_payload *priv = nft_expr_priv(expr);
144
145 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
146 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
147 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
148 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
149 goto nla_put_failure;
150 return 0;
151
152 nla_put_failure:
153 return -1;
154 }
155
nft_payload_offload_ll(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)156 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
157 struct nft_flow_rule *flow,
158 const struct nft_payload *priv)
159 {
160 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
161
162 switch (priv->offset) {
163 case offsetof(struct ethhdr, h_source):
164 if (priv->len != ETH_ALEN)
165 return -EOPNOTSUPP;
166
167 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
168 src, ETH_ALEN, reg);
169 break;
170 case offsetof(struct ethhdr, h_dest):
171 if (priv->len != ETH_ALEN)
172 return -EOPNOTSUPP;
173
174 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
175 dst, ETH_ALEN, reg);
176 break;
177 default:
178 return -EOPNOTSUPP;
179 }
180
181 return 0;
182 }
183
nft_payload_offload_ip(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)184 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
185 struct nft_flow_rule *flow,
186 const struct nft_payload *priv)
187 {
188 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
189
190 switch (priv->offset) {
191 case offsetof(struct iphdr, saddr):
192 if (priv->len != sizeof(struct in_addr))
193 return -EOPNOTSUPP;
194
195 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
196 sizeof(struct in_addr), reg);
197 break;
198 case offsetof(struct iphdr, daddr):
199 if (priv->len != sizeof(struct in_addr))
200 return -EOPNOTSUPP;
201
202 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
203 sizeof(struct in_addr), reg);
204 break;
205 case offsetof(struct iphdr, protocol):
206 if (priv->len != sizeof(__u8))
207 return -EOPNOTSUPP;
208
209 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
210 sizeof(__u8), reg);
211 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
212 break;
213 default:
214 return -EOPNOTSUPP;
215 }
216
217 return 0;
218 }
219
nft_payload_offload_ip6(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)220 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
221 struct nft_flow_rule *flow,
222 const struct nft_payload *priv)
223 {
224 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
225
226 switch (priv->offset) {
227 case offsetof(struct ipv6hdr, saddr):
228 if (priv->len != sizeof(struct in6_addr))
229 return -EOPNOTSUPP;
230
231 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
232 sizeof(struct in6_addr), reg);
233 break;
234 case offsetof(struct ipv6hdr, daddr):
235 if (priv->len != sizeof(struct in6_addr))
236 return -EOPNOTSUPP;
237
238 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
239 sizeof(struct in6_addr), reg);
240 break;
241 case offsetof(struct ipv6hdr, nexthdr):
242 if (priv->len != sizeof(__u8))
243 return -EOPNOTSUPP;
244
245 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
246 sizeof(__u8), reg);
247 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
248 break;
249 default:
250 return -EOPNOTSUPP;
251 }
252
253 return 0;
254 }
255
nft_payload_offload_nh(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)256 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
257 struct nft_flow_rule *flow,
258 const struct nft_payload *priv)
259 {
260 int err;
261
262 switch (ctx->dep.l3num) {
263 case htons(ETH_P_IP):
264 err = nft_payload_offload_ip(ctx, flow, priv);
265 break;
266 case htons(ETH_P_IPV6):
267 err = nft_payload_offload_ip6(ctx, flow, priv);
268 break;
269 default:
270 return -EOPNOTSUPP;
271 }
272
273 return err;
274 }
275
nft_payload_offload_tcp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)276 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
277 struct nft_flow_rule *flow,
278 const struct nft_payload *priv)
279 {
280 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
281
282 switch (priv->offset) {
283 case offsetof(struct tcphdr, source):
284 if (priv->len != sizeof(__be16))
285 return -EOPNOTSUPP;
286
287 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
288 sizeof(__be16), reg);
289 break;
290 case offsetof(struct tcphdr, dest):
291 if (priv->len != sizeof(__be16))
292 return -EOPNOTSUPP;
293
294 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
295 sizeof(__be16), reg);
296 break;
297 default:
298 return -EOPNOTSUPP;
299 }
300
301 return 0;
302 }
303
nft_payload_offload_udp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)304 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
305 struct nft_flow_rule *flow,
306 const struct nft_payload *priv)
307 {
308 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
309
310 switch (priv->offset) {
311 case offsetof(struct udphdr, source):
312 if (priv->len != sizeof(__be16))
313 return -EOPNOTSUPP;
314
315 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
316 sizeof(__be16), reg);
317 break;
318 case offsetof(struct udphdr, dest):
319 if (priv->len != sizeof(__be16))
320 return -EOPNOTSUPP;
321
322 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
323 sizeof(__be16), reg);
324 break;
325 default:
326 return -EOPNOTSUPP;
327 }
328
329 return 0;
330 }
331
nft_payload_offload_th(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)332 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
333 struct nft_flow_rule *flow,
334 const struct nft_payload *priv)
335 {
336 int err;
337
338 switch (ctx->dep.protonum) {
339 case IPPROTO_TCP:
340 err = nft_payload_offload_tcp(ctx, flow, priv);
341 break;
342 case IPPROTO_UDP:
343 err = nft_payload_offload_udp(ctx, flow, priv);
344 break;
345 default:
346 return -EOPNOTSUPP;
347 }
348
349 return err;
350 }
351
nft_payload_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)352 static int nft_payload_offload(struct nft_offload_ctx *ctx,
353 struct nft_flow_rule *flow,
354 const struct nft_expr *expr)
355 {
356 const struct nft_payload *priv = nft_expr_priv(expr);
357 int err;
358
359 switch (priv->base) {
360 case NFT_PAYLOAD_LL_HEADER:
361 err = nft_payload_offload_ll(ctx, flow, priv);
362 break;
363 case NFT_PAYLOAD_NETWORK_HEADER:
364 err = nft_payload_offload_nh(ctx, flow, priv);
365 break;
366 case NFT_PAYLOAD_TRANSPORT_HEADER:
367 err = nft_payload_offload_th(ctx, flow, priv);
368 break;
369 default:
370 err = -EOPNOTSUPP;
371 break;
372 }
373 return err;
374 }
375
376 static const struct nft_expr_ops nft_payload_ops = {
377 .type = &nft_payload_type,
378 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
379 .eval = nft_payload_eval,
380 .init = nft_payload_init,
381 .dump = nft_payload_dump,
382 .offload = nft_payload_offload,
383 };
384
385 const struct nft_expr_ops nft_payload_fast_ops = {
386 .type = &nft_payload_type,
387 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
388 .eval = nft_payload_eval,
389 .init = nft_payload_init,
390 .dump = nft_payload_dump,
391 .offload = nft_payload_offload,
392 };
393
nft_csum_replace(__sum16 * sum,__wsum fsum,__wsum tsum)394 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
395 {
396 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
397 if (*sum == 0)
398 *sum = CSUM_MANGLED_0;
399 }
400
nft_payload_udp_checksum(struct sk_buff * skb,unsigned int thoff)401 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
402 {
403 struct udphdr *uh, _uh;
404
405 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
406 if (!uh)
407 return false;
408
409 return (__force bool)uh->check;
410 }
411
nft_payload_l4csum_offset(const struct nft_pktinfo * pkt,struct sk_buff * skb,unsigned int * l4csum_offset)412 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
413 struct sk_buff *skb,
414 unsigned int *l4csum_offset)
415 {
416 switch (pkt->tprot) {
417 case IPPROTO_TCP:
418 *l4csum_offset = offsetof(struct tcphdr, check);
419 break;
420 case IPPROTO_UDP:
421 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
422 return -1;
423 /* Fall through. */
424 case IPPROTO_UDPLITE:
425 *l4csum_offset = offsetof(struct udphdr, check);
426 break;
427 case IPPROTO_ICMPV6:
428 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
429 break;
430 default:
431 return -1;
432 }
433
434 *l4csum_offset += pkt->xt.thoff;
435 return 0;
436 }
437
nft_payload_l4csum_update(const struct nft_pktinfo * pkt,struct sk_buff * skb,__wsum fsum,__wsum tsum)438 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
439 struct sk_buff *skb,
440 __wsum fsum, __wsum tsum)
441 {
442 int l4csum_offset;
443 __sum16 sum;
444
445 /* If we cannot determine layer 4 checksum offset or this packet doesn't
446 * require layer 4 checksum recalculation, skip this packet.
447 */
448 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
449 return 0;
450
451 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
452 return -1;
453
454 /* Checksum mangling for an arbitrary amount of bytes, based on
455 * inet_proto_csum_replace*() functions.
456 */
457 if (skb->ip_summed != CHECKSUM_PARTIAL) {
458 nft_csum_replace(&sum, fsum, tsum);
459 if (skb->ip_summed == CHECKSUM_COMPLETE) {
460 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
461 tsum);
462 }
463 } else {
464 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
465 tsum));
466 }
467
468 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
469 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
470 return -1;
471
472 return 0;
473 }
474
nft_payload_csum_inet(struct sk_buff * skb,const u32 * src,__wsum fsum,__wsum tsum,int csum_offset)475 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
476 __wsum fsum, __wsum tsum, int csum_offset)
477 {
478 __sum16 sum;
479
480 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
481 return -1;
482
483 nft_csum_replace(&sum, fsum, tsum);
484 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
485 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
486 return -1;
487
488 return 0;
489 }
490
nft_payload_set_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)491 static void nft_payload_set_eval(const struct nft_expr *expr,
492 struct nft_regs *regs,
493 const struct nft_pktinfo *pkt)
494 {
495 const struct nft_payload_set *priv = nft_expr_priv(expr);
496 struct sk_buff *skb = pkt->skb;
497 const u32 *src = ®s->data[priv->sreg];
498 int offset, csum_offset;
499 __wsum fsum, tsum;
500
501 switch (priv->base) {
502 case NFT_PAYLOAD_LL_HEADER:
503 if (!skb_mac_header_was_set(skb))
504 goto err;
505 offset = skb_mac_header(skb) - skb->data;
506 break;
507 case NFT_PAYLOAD_NETWORK_HEADER:
508 offset = skb_network_offset(skb);
509 break;
510 case NFT_PAYLOAD_TRANSPORT_HEADER:
511 if (!pkt->tprot_set)
512 goto err;
513 offset = pkt->xt.thoff;
514 break;
515 default:
516 BUG();
517 }
518
519 csum_offset = offset + priv->csum_offset;
520 offset += priv->offset;
521
522 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
523 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
524 skb->ip_summed != CHECKSUM_PARTIAL)) {
525 fsum = skb_checksum(skb, offset, priv->len, 0);
526 tsum = csum_partial(src, priv->len, 0);
527
528 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
529 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
530 goto err;
531
532 if (priv->csum_flags &&
533 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
534 goto err;
535 }
536
537 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
538 skb_store_bits(skb, offset, src, priv->len) < 0)
539 goto err;
540
541 return;
542 err:
543 regs->verdict.code = NFT_BREAK;
544 }
545
nft_payload_set_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])546 static int nft_payload_set_init(const struct nft_ctx *ctx,
547 const struct nft_expr *expr,
548 const struct nlattr * const tb[])
549 {
550 struct nft_payload_set *priv = nft_expr_priv(expr);
551
552 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
553 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
554 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
555 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
556
557 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
558 priv->csum_type =
559 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
560 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
561 priv->csum_offset =
562 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
563 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
564 u32 flags;
565
566 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
567 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
568 return -EINVAL;
569
570 priv->csum_flags = flags;
571 }
572
573 switch (priv->csum_type) {
574 case NFT_PAYLOAD_CSUM_NONE:
575 case NFT_PAYLOAD_CSUM_INET:
576 break;
577 default:
578 return -EOPNOTSUPP;
579 }
580
581 return nft_validate_register_load(priv->sreg, priv->len);
582 }
583
nft_payload_set_dump(struct sk_buff * skb,const struct nft_expr * expr)584 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
585 {
586 const struct nft_payload_set *priv = nft_expr_priv(expr);
587
588 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
589 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
590 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
591 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
592 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
593 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
594 htonl(priv->csum_offset)) ||
595 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
596 goto nla_put_failure;
597 return 0;
598
599 nla_put_failure:
600 return -1;
601 }
602
603 static const struct nft_expr_ops nft_payload_set_ops = {
604 .type = &nft_payload_type,
605 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
606 .eval = nft_payload_set_eval,
607 .init = nft_payload_set_init,
608 .dump = nft_payload_set_dump,
609 };
610
611 static const struct nft_expr_ops *
nft_payload_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])612 nft_payload_select_ops(const struct nft_ctx *ctx,
613 const struct nlattr * const tb[])
614 {
615 enum nft_payload_bases base;
616 unsigned int offset, len;
617
618 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
619 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
620 tb[NFTA_PAYLOAD_LEN] == NULL)
621 return ERR_PTR(-EINVAL);
622
623 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
624 switch (base) {
625 case NFT_PAYLOAD_LL_HEADER:
626 case NFT_PAYLOAD_NETWORK_HEADER:
627 case NFT_PAYLOAD_TRANSPORT_HEADER:
628 break;
629 default:
630 return ERR_PTR(-EOPNOTSUPP);
631 }
632
633 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
634 if (tb[NFTA_PAYLOAD_DREG] != NULL)
635 return ERR_PTR(-EINVAL);
636 return &nft_payload_set_ops;
637 }
638
639 if (tb[NFTA_PAYLOAD_DREG] == NULL)
640 return ERR_PTR(-EINVAL);
641
642 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
643 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
644
645 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
646 base != NFT_PAYLOAD_LL_HEADER)
647 return &nft_payload_fast_ops;
648 else
649 return &nft_payload_ops;
650 }
651
652 struct nft_expr_type nft_payload_type __read_mostly = {
653 .name = "payload",
654 .select_ops = nft_payload_select_ops,
655 .policy = nft_payload_policy,
656 .maxattr = NFTA_PAYLOAD_MAX,
657 .owner = THIS_MODULE,
658 };
659