1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <asm/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/netlink.h>
11 #include <linux/netfilter.h>
12 #include <linux/netfilter/nf_tables.h>
13 #include <linux/sctp.h>
14 #include <net/netfilter/nf_tables_core.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/sctp/sctp.h>
17 #include <net/tcp.h>
18
19 struct nft_exthdr {
20 u8 type;
21 u8 offset;
22 u8 len;
23 u8 op;
24 u8 dreg;
25 u8 sreg;
26 u8 flags;
27 };
28
optlen(const u8 * opt,unsigned int offset)29 static unsigned int optlen(const u8 *opt, unsigned int offset)
30 {
31 /* Beware zero-length options: make finite progress */
32 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
33 return 1;
34 else
35 return opt[offset + 1];
36 }
37
nft_skb_copy_to_reg(const struct sk_buff * skb,int offset,u32 * dest,unsigned int len)38 static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
39 {
40 if (len % NFT_REG32_SIZE)
41 dest[len / NFT_REG32_SIZE] = 0;
42
43 return skb_copy_bits(skb, offset, dest, len);
44 }
45
nft_exthdr_ipv6_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)46 static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
47 struct nft_regs *regs,
48 const struct nft_pktinfo *pkt)
49 {
50 struct nft_exthdr *priv = nft_expr_priv(expr);
51 u32 *dest = ®s->data[priv->dreg];
52 unsigned int offset = 0;
53 int err;
54
55 if (pkt->skb->protocol != htons(ETH_P_IPV6))
56 goto err;
57
58 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
59 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
60 nft_reg_store8(dest, err >= 0);
61 return;
62 } else if (err < 0) {
63 goto err;
64 }
65 offset += priv->offset;
66
67 if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
68 goto err;
69 return;
70 err:
71 regs->verdict.code = NFT_BREAK;
72 }
73
74 /* find the offset to specified option.
75 *
76 * If target header is found, its offset is set in *offset and return option
77 * number. Otherwise, return negative error.
78 *
79 * If the first fragment doesn't contain the End of Options it is considered
80 * invalid.
81 */
ipv4_find_option(struct net * net,struct sk_buff * skb,unsigned int * offset,int target)82 static int ipv4_find_option(struct net *net, struct sk_buff *skb,
83 unsigned int *offset, int target)
84 {
85 unsigned char optbuf[sizeof(struct ip_options) + 40];
86 struct ip_options *opt = (struct ip_options *)optbuf;
87 struct iphdr *iph, _iph;
88 unsigned int start;
89 bool found = false;
90 __be32 info;
91 int optlen;
92
93 iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
94 if (!iph)
95 return -EBADMSG;
96 start = sizeof(struct iphdr);
97
98 optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
99 if (optlen <= 0)
100 return -ENOENT;
101
102 memset(opt, 0, sizeof(struct ip_options));
103 /* Copy the options since __ip_options_compile() modifies
104 * the options.
105 */
106 if (skb_copy_bits(skb, start, opt->__data, optlen))
107 return -EBADMSG;
108 opt->optlen = optlen;
109
110 if (__ip_options_compile(net, opt, NULL, &info))
111 return -EBADMSG;
112
113 switch (target) {
114 case IPOPT_SSRR:
115 case IPOPT_LSRR:
116 if (!opt->srr)
117 break;
118 found = target == IPOPT_SSRR ? opt->is_strictroute :
119 !opt->is_strictroute;
120 if (found)
121 *offset = opt->srr + start;
122 break;
123 case IPOPT_RR:
124 if (!opt->rr)
125 break;
126 *offset = opt->rr + start;
127 found = true;
128 break;
129 case IPOPT_RA:
130 if (!opt->router_alert)
131 break;
132 *offset = opt->router_alert + start;
133 found = true;
134 break;
135 default:
136 return -EOPNOTSUPP;
137 }
138 return found ? target : -ENOENT;
139 }
140
nft_exthdr_ipv4_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)141 static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
142 struct nft_regs *regs,
143 const struct nft_pktinfo *pkt)
144 {
145 struct nft_exthdr *priv = nft_expr_priv(expr);
146 u32 *dest = ®s->data[priv->dreg];
147 struct sk_buff *skb = pkt->skb;
148 unsigned int offset;
149 int err;
150
151 if (skb->protocol != htons(ETH_P_IP))
152 goto err;
153
154 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
155 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
156 nft_reg_store8(dest, err >= 0);
157 return;
158 } else if (err < 0) {
159 goto err;
160 }
161 offset += priv->offset;
162
163 if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
164 goto err;
165 return;
166 err:
167 regs->verdict.code = NFT_BREAK;
168 }
169
170 static void *
nft_tcp_header_pointer(const struct nft_pktinfo * pkt,unsigned int len,void * buffer,unsigned int * tcphdr_len)171 nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
172 unsigned int len, void *buffer, unsigned int *tcphdr_len)
173 {
174 struct tcphdr *tcph;
175
176 if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
177 return NULL;
178
179 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
180 if (!tcph)
181 return NULL;
182
183 *tcphdr_len = __tcp_hdrlen(tcph);
184 if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
185 return NULL;
186
187 return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer);
188 }
189
nft_exthdr_tcp_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)190 static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
191 struct nft_regs *regs,
192 const struct nft_pktinfo *pkt)
193 {
194 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
195 struct nft_exthdr *priv = nft_expr_priv(expr);
196 unsigned int i, optl, tcphdr_len, offset;
197 u32 *dest = ®s->data[priv->dreg];
198 struct tcphdr *tcph;
199 u8 *opt;
200
201 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
202 if (!tcph)
203 goto err;
204
205 opt = (u8 *)tcph;
206 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
207 optl = optlen(opt, i);
208
209 if (priv->type != opt[i])
210 continue;
211
212 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
213 goto err;
214
215 offset = i + priv->offset;
216 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
217 nft_reg_store8(dest, 1);
218 } else {
219 if (priv->len % NFT_REG32_SIZE)
220 dest[priv->len / NFT_REG32_SIZE] = 0;
221 memcpy(dest, opt + offset, priv->len);
222 }
223
224 return;
225 }
226
227 err:
228 if (priv->flags & NFT_EXTHDR_F_PRESENT)
229 *dest = 0;
230 else
231 regs->verdict.code = NFT_BREAK;
232 }
233
nft_exthdr_tcp_set_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)234 static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
235 struct nft_regs *regs,
236 const struct nft_pktinfo *pkt)
237 {
238 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
239 struct nft_exthdr *priv = nft_expr_priv(expr);
240 unsigned int i, optl, tcphdr_len, offset;
241 struct tcphdr *tcph;
242 u8 *opt;
243
244 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
245 if (!tcph)
246 goto err;
247
248 if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
249 goto err;
250
251 tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
252 opt = (u8 *)tcph;
253
254 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
255 union {
256 __be16 v16;
257 __be32 v32;
258 } old, new;
259
260 optl = optlen(opt, i);
261
262 if (priv->type != opt[i])
263 continue;
264
265 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
266 goto err;
267
268 offset = i + priv->offset;
269
270 switch (priv->len) {
271 case 2:
272 old.v16 = get_unaligned((u16 *)(opt + offset));
273 new.v16 = (__force __be16)nft_reg_load16(
274 ®s->data[priv->sreg]);
275
276 switch (priv->type) {
277 case TCPOPT_MSS:
278 /* increase can cause connection to stall */
279 if (ntohs(old.v16) <= ntohs(new.v16))
280 return;
281 break;
282 }
283
284 if (old.v16 == new.v16)
285 return;
286
287 put_unaligned(new.v16, (u16*)(opt + offset));
288 inet_proto_csum_replace2(&tcph->check, pkt->skb,
289 old.v16, new.v16, false);
290 break;
291 case 4:
292 new.v32 = regs->data[priv->sreg];
293 old.v32 = get_unaligned((u32 *)(opt + offset));
294
295 if (old.v32 == new.v32)
296 return;
297
298 put_unaligned(new.v32, (u32*)(opt + offset));
299 inet_proto_csum_replace4(&tcph->check, pkt->skb,
300 old.v32, new.v32, false);
301 break;
302 default:
303 WARN_ON_ONCE(1);
304 break;
305 }
306
307 return;
308 }
309 return;
310 err:
311 regs->verdict.code = NFT_BREAK;
312 }
313
nft_exthdr_tcp_strip_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)314 static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr,
315 struct nft_regs *regs,
316 const struct nft_pktinfo *pkt)
317 {
318 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
319 struct nft_exthdr *priv = nft_expr_priv(expr);
320 unsigned int i, tcphdr_len, optl;
321 struct tcphdr *tcph;
322 u8 *opt;
323
324 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
325 if (!tcph)
326 goto err;
327
328 if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
329 goto drop;
330
331 tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
332 opt = (u8 *)tcph;
333
334 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
335 unsigned int j;
336
337 optl = optlen(opt, i);
338 if (priv->type != opt[i])
339 continue;
340
341 if (i + optl > tcphdr_len)
342 goto drop;
343
344 for (j = 0; j < optl; ++j) {
345 u16 n = TCPOPT_NOP;
346 u16 o = opt[i+j];
347
348 if ((i + j) % 2 == 0) {
349 o <<= 8;
350 n <<= 8;
351 }
352 inet_proto_csum_replace2(&tcph->check, pkt->skb, htons(o),
353 htons(n), false);
354 }
355 memset(opt + i, TCPOPT_NOP, optl);
356 return;
357 }
358
359 /* option not found, continue. This allows to do multiple
360 * option removals per rule.
361 */
362 return;
363 err:
364 regs->verdict.code = NFT_BREAK;
365 return;
366 drop:
367 /* can't remove, no choice but to drop */
368 regs->verdict.code = NF_DROP;
369 }
370
nft_exthdr_sctp_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)371 static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
372 struct nft_regs *regs,
373 const struct nft_pktinfo *pkt)
374 {
375 unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr);
376 struct nft_exthdr *priv = nft_expr_priv(expr);
377 u32 *dest = ®s->data[priv->dreg];
378 const struct sctp_chunkhdr *sch;
379 struct sctp_chunkhdr _sch;
380
381 if (pkt->tprot != IPPROTO_SCTP)
382 goto err;
383
384 do {
385 sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
386 if (!sch || !sch->length)
387 break;
388
389 if (sch->type == priv->type) {
390 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
391 nft_reg_store8(dest, true);
392 return;
393 }
394 if (priv->offset + priv->len > ntohs(sch->length) ||
395 offset + ntohs(sch->length) > pkt->skb->len)
396 break;
397
398 if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
399 dest, priv->len) < 0)
400 break;
401 return;
402 }
403 offset += SCTP_PAD4(ntohs(sch->length));
404 } while (offset < pkt->skb->len);
405 err:
406 if (priv->flags & NFT_EXTHDR_F_PRESENT)
407 nft_reg_store8(dest, false);
408 else
409 regs->verdict.code = NFT_BREAK;
410 }
411
412 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
413 [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
414 [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
415 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
416 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
417 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
418 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
419 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
420 };
421
nft_exthdr_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])422 static int nft_exthdr_init(const struct nft_ctx *ctx,
423 const struct nft_expr *expr,
424 const struct nlattr * const tb[])
425 {
426 struct nft_exthdr *priv = nft_expr_priv(expr);
427 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
428 int err;
429
430 if (!tb[NFTA_EXTHDR_DREG] ||
431 !tb[NFTA_EXTHDR_TYPE] ||
432 !tb[NFTA_EXTHDR_OFFSET] ||
433 !tb[NFTA_EXTHDR_LEN])
434 return -EINVAL;
435
436 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
437 if (err < 0)
438 return err;
439
440 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
441 if (err < 0)
442 return err;
443
444 if (tb[NFTA_EXTHDR_FLAGS]) {
445 err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
446 if (err < 0)
447 return err;
448
449 if (flags & ~NFT_EXTHDR_F_PRESENT)
450 return -EINVAL;
451 }
452
453 if (tb[NFTA_EXTHDR_OP]) {
454 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
455 if (err < 0)
456 return err;
457 }
458
459 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
460 priv->offset = offset;
461 priv->len = len;
462 priv->flags = flags;
463 priv->op = op;
464
465 return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
466 &priv->dreg, NULL, NFT_DATA_VALUE,
467 priv->len);
468 }
469
nft_exthdr_tcp_set_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])470 static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
471 const struct nft_expr *expr,
472 const struct nlattr * const tb[])
473 {
474 struct nft_exthdr *priv = nft_expr_priv(expr);
475 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
476 int err;
477
478 if (!tb[NFTA_EXTHDR_SREG] ||
479 !tb[NFTA_EXTHDR_TYPE] ||
480 !tb[NFTA_EXTHDR_OFFSET] ||
481 !tb[NFTA_EXTHDR_LEN])
482 return -EINVAL;
483
484 if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
485 return -EINVAL;
486
487 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
488 if (err < 0)
489 return err;
490
491 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
492 if (err < 0)
493 return err;
494
495 if (offset < 2)
496 return -EOPNOTSUPP;
497
498 switch (len) {
499 case 2: break;
500 case 4: break;
501 default:
502 return -EOPNOTSUPP;
503 }
504
505 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
506 if (err < 0)
507 return err;
508
509 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
510 priv->offset = offset;
511 priv->len = len;
512 priv->flags = flags;
513 priv->op = op;
514
515 return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
516 priv->len);
517 }
518
nft_exthdr_tcp_strip_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])519 static int nft_exthdr_tcp_strip_init(const struct nft_ctx *ctx,
520 const struct nft_expr *expr,
521 const struct nlattr * const tb[])
522 {
523 struct nft_exthdr *priv = nft_expr_priv(expr);
524
525 if (tb[NFTA_EXTHDR_SREG] ||
526 tb[NFTA_EXTHDR_DREG] ||
527 tb[NFTA_EXTHDR_FLAGS] ||
528 tb[NFTA_EXTHDR_OFFSET] ||
529 tb[NFTA_EXTHDR_LEN])
530 return -EINVAL;
531
532 if (!tb[NFTA_EXTHDR_TYPE])
533 return -EINVAL;
534
535 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
536 priv->op = NFT_EXTHDR_OP_TCPOPT;
537
538 return 0;
539 }
540
nft_exthdr_ipv4_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])541 static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
542 const struct nft_expr *expr,
543 const struct nlattr * const tb[])
544 {
545 struct nft_exthdr *priv = nft_expr_priv(expr);
546 int err = nft_exthdr_init(ctx, expr, tb);
547
548 if (err < 0)
549 return err;
550
551 switch (priv->type) {
552 case IPOPT_SSRR:
553 case IPOPT_LSRR:
554 case IPOPT_RR:
555 case IPOPT_RA:
556 break;
557 default:
558 return -EOPNOTSUPP;
559 }
560 return 0;
561 }
562
nft_exthdr_dump_common(struct sk_buff * skb,const struct nft_exthdr * priv)563 static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
564 {
565 if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
566 goto nla_put_failure;
567 if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
568 goto nla_put_failure;
569 if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
570 goto nla_put_failure;
571 if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
572 goto nla_put_failure;
573 if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
574 goto nla_put_failure;
575 return 0;
576
577 nla_put_failure:
578 return -1;
579 }
580
nft_exthdr_dump(struct sk_buff * skb,const struct nft_expr * expr)581 static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
582 {
583 const struct nft_exthdr *priv = nft_expr_priv(expr);
584
585 if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
586 return -1;
587
588 return nft_exthdr_dump_common(skb, priv);
589 }
590
nft_exthdr_dump_set(struct sk_buff * skb,const struct nft_expr * expr)591 static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
592 {
593 const struct nft_exthdr *priv = nft_expr_priv(expr);
594
595 if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
596 return -1;
597
598 return nft_exthdr_dump_common(skb, priv);
599 }
600
nft_exthdr_dump_strip(struct sk_buff * skb,const struct nft_expr * expr)601 static int nft_exthdr_dump_strip(struct sk_buff *skb, const struct nft_expr *expr)
602 {
603 const struct nft_exthdr *priv = nft_expr_priv(expr);
604
605 return nft_exthdr_dump_common(skb, priv);
606 }
607
608 static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
609 .type = &nft_exthdr_type,
610 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
611 .eval = nft_exthdr_ipv6_eval,
612 .init = nft_exthdr_init,
613 .dump = nft_exthdr_dump,
614 };
615
616 static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
617 .type = &nft_exthdr_type,
618 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
619 .eval = nft_exthdr_ipv4_eval,
620 .init = nft_exthdr_ipv4_init,
621 .dump = nft_exthdr_dump,
622 };
623
624 static const struct nft_expr_ops nft_exthdr_tcp_ops = {
625 .type = &nft_exthdr_type,
626 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
627 .eval = nft_exthdr_tcp_eval,
628 .init = nft_exthdr_init,
629 .dump = nft_exthdr_dump,
630 };
631
632 static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
633 .type = &nft_exthdr_type,
634 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
635 .eval = nft_exthdr_tcp_set_eval,
636 .init = nft_exthdr_tcp_set_init,
637 .dump = nft_exthdr_dump_set,
638 };
639
640 static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = {
641 .type = &nft_exthdr_type,
642 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
643 .eval = nft_exthdr_tcp_strip_eval,
644 .init = nft_exthdr_tcp_strip_init,
645 .dump = nft_exthdr_dump_strip,
646 };
647
648 static const struct nft_expr_ops nft_exthdr_sctp_ops = {
649 .type = &nft_exthdr_type,
650 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
651 .eval = nft_exthdr_sctp_eval,
652 .init = nft_exthdr_init,
653 .dump = nft_exthdr_dump,
654 };
655
656 static const struct nft_expr_ops *
nft_exthdr_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])657 nft_exthdr_select_ops(const struct nft_ctx *ctx,
658 const struct nlattr * const tb[])
659 {
660 u32 op;
661
662 if (!tb[NFTA_EXTHDR_OP])
663 return &nft_exthdr_ipv6_ops;
664
665 if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
666 return ERR_PTR(-EOPNOTSUPP);
667
668 op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
669 switch (op) {
670 case NFT_EXTHDR_OP_TCPOPT:
671 if (tb[NFTA_EXTHDR_SREG])
672 return &nft_exthdr_tcp_set_ops;
673 if (tb[NFTA_EXTHDR_DREG])
674 return &nft_exthdr_tcp_ops;
675 return &nft_exthdr_tcp_strip_ops;
676 case NFT_EXTHDR_OP_IPV6:
677 if (tb[NFTA_EXTHDR_DREG])
678 return &nft_exthdr_ipv6_ops;
679 break;
680 case NFT_EXTHDR_OP_IPV4:
681 if (ctx->family != NFPROTO_IPV6) {
682 if (tb[NFTA_EXTHDR_DREG])
683 return &nft_exthdr_ipv4_ops;
684 }
685 break;
686 case NFT_EXTHDR_OP_SCTP:
687 if (tb[NFTA_EXTHDR_DREG])
688 return &nft_exthdr_sctp_ops;
689 break;
690 }
691
692 return ERR_PTR(-EOPNOTSUPP);
693 }
694
695 struct nft_expr_type nft_exthdr_type __read_mostly = {
696 .name = "exthdr",
697 .select_ops = nft_exthdr_select_ops,
698 .policy = nft_exthdr_policy,
699 .maxattr = NFTA_EXTHDR_MAX,
700 .owner = THIS_MODULE,
701 };
702