• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 #include <net/geneve.h>
15 
16 struct nft_tunnel {
17 	enum nft_tunnel_keys	key:8;
18 	u8			dreg;
19 	enum nft_tunnel_mode	mode:8;
20 };
21 
nft_tunnel_get_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)22 static void nft_tunnel_get_eval(const struct nft_expr *expr,
23 				struct nft_regs *regs,
24 				const struct nft_pktinfo *pkt)
25 {
26 	const struct nft_tunnel *priv = nft_expr_priv(expr);
27 	u32 *dest = &regs->data[priv->dreg];
28 	struct ip_tunnel_info *tun_info;
29 
30 	tun_info = skb_tunnel_info(pkt->skb);
31 
32 	switch (priv->key) {
33 	case NFT_TUNNEL_PATH:
34 		if (!tun_info) {
35 			nft_reg_store8(dest, false);
36 			return;
37 		}
38 		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
39 		    (priv->mode == NFT_TUNNEL_MODE_RX &&
40 		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
41 		    (priv->mode == NFT_TUNNEL_MODE_TX &&
42 		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
43 			nft_reg_store8(dest, true);
44 		else
45 			nft_reg_store8(dest, false);
46 		break;
47 	case NFT_TUNNEL_ID:
48 		if (!tun_info) {
49 			regs->verdict.code = NFT_BREAK;
50 			return;
51 		}
52 		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
53 		    (priv->mode == NFT_TUNNEL_MODE_RX &&
54 		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
55 		    (priv->mode == NFT_TUNNEL_MODE_TX &&
56 		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
57 			*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
58 		else
59 			regs->verdict.code = NFT_BREAK;
60 		break;
61 	default:
62 		WARN_ON(1);
63 		regs->verdict.code = NFT_BREAK;
64 	}
65 }
66 
67 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
68 	[NFTA_TUNNEL_KEY]	= { .type = NLA_U32 },
69 	[NFTA_TUNNEL_DREG]	= { .type = NLA_U32 },
70 	[NFTA_TUNNEL_MODE]	= { .type = NLA_U32 },
71 };
72 
nft_tunnel_get_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])73 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
74 			       const struct nft_expr *expr,
75 			       const struct nlattr * const tb[])
76 {
77 	struct nft_tunnel *priv = nft_expr_priv(expr);
78 	u32 len;
79 
80 	if (!tb[NFTA_TUNNEL_KEY] ||
81 	    !tb[NFTA_TUNNEL_DREG])
82 		return -EINVAL;
83 
84 	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
85 	switch (priv->key) {
86 	case NFT_TUNNEL_PATH:
87 		len = sizeof(u8);
88 		break;
89 	case NFT_TUNNEL_ID:
90 		len = sizeof(u32);
91 		break;
92 	default:
93 		return -EOPNOTSUPP;
94 	}
95 
96 	if (tb[NFTA_TUNNEL_MODE]) {
97 		priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
98 		if (priv->mode > NFT_TUNNEL_MODE_MAX)
99 			return -EOPNOTSUPP;
100 	} else {
101 		priv->mode = NFT_TUNNEL_MODE_NONE;
102 	}
103 
104 	return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
105 					NULL, NFT_DATA_VALUE, len);
106 }
107 
nft_tunnel_get_dump(struct sk_buff * skb,const struct nft_expr * expr)108 static int nft_tunnel_get_dump(struct sk_buff *skb,
109 			       const struct nft_expr *expr)
110 {
111 	const struct nft_tunnel *priv = nft_expr_priv(expr);
112 
113 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
114 		goto nla_put_failure;
115 	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
116 		goto nla_put_failure;
117 	if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
118 		goto nla_put_failure;
119 	return 0;
120 
121 nla_put_failure:
122 	return -1;
123 }
124 
125 static struct nft_expr_type nft_tunnel_type;
126 static const struct nft_expr_ops nft_tunnel_get_ops = {
127 	.type		= &nft_tunnel_type,
128 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
129 	.eval		= nft_tunnel_get_eval,
130 	.init		= nft_tunnel_get_init,
131 	.dump		= nft_tunnel_get_dump,
132 };
133 
134 static struct nft_expr_type nft_tunnel_type __read_mostly = {
135 	.name		= "tunnel",
136 	.family		= NFPROTO_NETDEV,
137 	.ops		= &nft_tunnel_get_ops,
138 	.policy		= nft_tunnel_policy,
139 	.maxattr	= NFTA_TUNNEL_MAX,
140 	.owner		= THIS_MODULE,
141 };
142 
143 struct nft_tunnel_opts {
144 	union {
145 		struct vxlan_metadata	vxlan;
146 		struct erspan_metadata	erspan;
147 		u8	data[IP_TUNNEL_OPTS_MAX];
148 	} u;
149 	u32	len;
150 	__be16	flags;
151 };
152 
153 struct nft_tunnel_obj {
154 	struct metadata_dst	*md;
155 	struct nft_tunnel_opts	opts;
156 };
157 
158 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
159 	[NFTA_TUNNEL_KEY_IP_SRC]	= { .type = NLA_U32 },
160 	[NFTA_TUNNEL_KEY_IP_DST]	= { .type = NLA_U32 },
161 };
162 
nft_tunnel_obj_ip_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)163 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
164 				  const struct nlattr *attr,
165 				  struct ip_tunnel_info *info)
166 {
167 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
168 	int err;
169 
170 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
171 					  nft_tunnel_ip_policy, NULL);
172 	if (err < 0)
173 		return err;
174 
175 	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
176 		return -EINVAL;
177 
178 	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
179 		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
180 	if (tb[NFTA_TUNNEL_KEY_IP_DST])
181 		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
182 
183 	return 0;
184 }
185 
186 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
187 	[NFTA_TUNNEL_KEY_IP6_SRC]	= { .len = sizeof(struct in6_addr), },
188 	[NFTA_TUNNEL_KEY_IP6_DST]	= { .len = sizeof(struct in6_addr), },
189 	[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]	= { .type = NLA_U32, }
190 };
191 
nft_tunnel_obj_ip6_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)192 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
193 				   const struct nlattr *attr,
194 				   struct ip_tunnel_info *info)
195 {
196 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
197 	int err;
198 
199 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
200 					  nft_tunnel_ip6_policy, NULL);
201 	if (err < 0)
202 		return err;
203 
204 	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
205 		return -EINVAL;
206 
207 	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
208 		memcpy(&info->key.u.ipv6.src,
209 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
210 		       sizeof(struct in6_addr));
211 	}
212 	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
213 		memcpy(&info->key.u.ipv6.dst,
214 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
215 		       sizeof(struct in6_addr));
216 	}
217 	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
218 		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
219 
220 	info->mode |= IP_TUNNEL_INFO_IPV6;
221 
222 	return 0;
223 }
224 
225 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
226 	[NFTA_TUNNEL_KEY_VXLAN_GBP]	= { .type = NLA_U32 },
227 };
228 
nft_tunnel_obj_vxlan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)229 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
230 				     struct nft_tunnel_opts *opts)
231 {
232 	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
233 	int err;
234 
235 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
236 					  nft_tunnel_opts_vxlan_policy, NULL);
237 	if (err < 0)
238 		return err;
239 
240 	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
241 		return -EINVAL;
242 
243 	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
244 
245 	opts->len	= sizeof(struct vxlan_metadata);
246 	opts->flags	= TUNNEL_VXLAN_OPT;
247 
248 	return 0;
249 }
250 
251 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
252 	[NFTA_TUNNEL_KEY_ERSPAN_VERSION]	= { .type = NLA_U32 },
253 	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
254 	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]		= { .type = NLA_U8 },
255 	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
256 };
257 
nft_tunnel_obj_erspan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)258 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
259 				      struct nft_tunnel_opts *opts)
260 {
261 	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
262 	uint8_t hwid, dir;
263 	int err, version;
264 
265 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
266 					  attr, nft_tunnel_opts_erspan_policy,
267 					  NULL);
268 	if (err < 0)
269 		return err;
270 
271 	if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
272 		 return -EINVAL;
273 
274 	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
275 	switch (version) {
276 	case ERSPAN_VERSION:
277 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
278 			return -EINVAL;
279 
280 		opts->u.erspan.u.index =
281 			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
282 		break;
283 	case ERSPAN_VERSION2:
284 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
285 		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
286 			return -EINVAL;
287 
288 		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
289 		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
290 
291 		set_hwid(&opts->u.erspan.u.md2, hwid);
292 		opts->u.erspan.u.md2.dir = dir;
293 		break;
294 	default:
295 		return -EOPNOTSUPP;
296 	}
297 	opts->u.erspan.version = version;
298 
299 	opts->len	= sizeof(struct erspan_metadata);
300 	opts->flags	= TUNNEL_ERSPAN_OPT;
301 
302 	return 0;
303 }
304 
305 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
306 	[NFTA_TUNNEL_KEY_GENEVE_CLASS]	= { .type = NLA_U16 },
307 	[NFTA_TUNNEL_KEY_GENEVE_TYPE]	= { .type = NLA_U8 },
308 	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
309 };
310 
nft_tunnel_obj_geneve_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)311 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
312 				      struct nft_tunnel_opts *opts)
313 {
314 	struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
315 	struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
316 	int err, data_len;
317 
318 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
319 			       nft_tunnel_opts_geneve_policy, NULL);
320 	if (err < 0)
321 		return err;
322 
323 	if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
324 	    !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
325 	    !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
326 		return -EINVAL;
327 
328 	attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
329 	data_len = nla_len(attr);
330 	if (data_len % 4)
331 		return -EINVAL;
332 
333 	opts->len += sizeof(*opt) + data_len;
334 	if (opts->len > IP_TUNNEL_OPTS_MAX)
335 		return -EINVAL;
336 
337 	memcpy(opt->opt_data, nla_data(attr), data_len);
338 	opt->length = data_len / 4;
339 	opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
340 	opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
341 	opts->flags = TUNNEL_GENEVE_OPT;
342 
343 	return 0;
344 }
345 
346 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
347 	[NFTA_TUNNEL_KEY_OPTS_UNSPEC]	= {
348 		.strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
349 	[NFTA_TUNNEL_KEY_OPTS_VXLAN]	= { .type = NLA_NESTED, },
350 	[NFTA_TUNNEL_KEY_OPTS_ERSPAN]	= { .type = NLA_NESTED, },
351 	[NFTA_TUNNEL_KEY_OPTS_GENEVE]	= { .type = NLA_NESTED, },
352 };
353 
nft_tunnel_obj_opts_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info,struct nft_tunnel_opts * opts)354 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
355 				    const struct nlattr *attr,
356 				    struct ip_tunnel_info *info,
357 				    struct nft_tunnel_opts *opts)
358 {
359 	int err, rem, type = 0;
360 	struct nlattr *nla;
361 
362 	err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
363 					     nft_tunnel_opts_policy, NULL);
364 	if (err < 0)
365 		return err;
366 
367 	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
368 		switch (nla_type(nla)) {
369 		case NFTA_TUNNEL_KEY_OPTS_VXLAN:
370 			if (type)
371 				return -EINVAL;
372 			err = nft_tunnel_obj_vxlan_init(nla, opts);
373 			if (err)
374 				return err;
375 			type = TUNNEL_VXLAN_OPT;
376 			break;
377 		case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
378 			if (type)
379 				return -EINVAL;
380 			err = nft_tunnel_obj_erspan_init(nla, opts);
381 			if (err)
382 				return err;
383 			type = TUNNEL_ERSPAN_OPT;
384 			break;
385 		case NFTA_TUNNEL_KEY_OPTS_GENEVE:
386 			if (type && type != TUNNEL_GENEVE_OPT)
387 				return -EINVAL;
388 			err = nft_tunnel_obj_geneve_init(nla, opts);
389 			if (err)
390 				return err;
391 			type = TUNNEL_GENEVE_OPT;
392 			break;
393 		default:
394 			return -EOPNOTSUPP;
395 		}
396 	}
397 
398 	return err;
399 }
400 
401 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
402 	[NFTA_TUNNEL_KEY_IP]	= { .type = NLA_NESTED, },
403 	[NFTA_TUNNEL_KEY_IP6]	= { .type = NLA_NESTED, },
404 	[NFTA_TUNNEL_KEY_ID]	= { .type = NLA_U32, },
405 	[NFTA_TUNNEL_KEY_FLAGS]	= { .type = NLA_U32, },
406 	[NFTA_TUNNEL_KEY_TOS]	= { .type = NLA_U8, },
407 	[NFTA_TUNNEL_KEY_TTL]	= { .type = NLA_U8, },
408 	[NFTA_TUNNEL_KEY_SPORT]	= { .type = NLA_U16, },
409 	[NFTA_TUNNEL_KEY_DPORT]	= { .type = NLA_U16, },
410 	[NFTA_TUNNEL_KEY_OPTS]	= { .type = NLA_NESTED, },
411 };
412 
nft_tunnel_obj_init(const struct nft_ctx * ctx,const struct nlattr * const tb[],struct nft_object * obj)413 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
414 			       const struct nlattr * const tb[],
415 			       struct nft_object *obj)
416 {
417 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
418 	struct ip_tunnel_info info;
419 	struct metadata_dst *md;
420 	int err;
421 
422 	if (!tb[NFTA_TUNNEL_KEY_ID])
423 		return -EINVAL;
424 
425 	memset(&info, 0, sizeof(info));
426 	info.mode		= IP_TUNNEL_INFO_TX;
427 	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
428 	info.key.tun_flags	= TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
429 
430 	if (tb[NFTA_TUNNEL_KEY_IP]) {
431 		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
432 		if (err < 0)
433 			return err;
434 	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
435 		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
436 		if (err < 0)
437 			return err;
438 	} else {
439 		return -EINVAL;
440 	}
441 
442 	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
443 		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
444 	}
445 	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
446 		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
447 	}
448 
449 	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
450 		u32 tun_flags;
451 
452 		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
453 		if (tun_flags & ~NFT_TUNNEL_F_MASK)
454 			return -EOPNOTSUPP;
455 
456 		if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
457 			info.key.tun_flags &= ~TUNNEL_CSUM;
458 		if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
459 			info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
460 		if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
461 			info.key.tun_flags |= TUNNEL_SEQ;
462 	}
463 	if (tb[NFTA_TUNNEL_KEY_TOS])
464 		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
465 	if (tb[NFTA_TUNNEL_KEY_TTL])
466 		info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
467 	else
468 		info.key.ttl = U8_MAX;
469 
470 	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
471 		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
472 					       &info, &priv->opts);
473 		if (err < 0)
474 			return err;
475 	}
476 
477 	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
478 	if (!md)
479 		return -ENOMEM;
480 
481 	memcpy(&md->u.tun_info, &info, sizeof(info));
482 #ifdef CONFIG_DST_CACHE
483 	err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
484 	if (err < 0) {
485 		metadata_dst_free(md);
486 		return err;
487 	}
488 #endif
489 	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
490 				priv->opts.flags);
491 	priv->md = md;
492 
493 	return 0;
494 }
495 
nft_tunnel_obj_eval(struct nft_object * obj,struct nft_regs * regs,const struct nft_pktinfo * pkt)496 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
497 				       struct nft_regs *regs,
498 				       const struct nft_pktinfo *pkt)
499 {
500 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
501 	struct sk_buff *skb = pkt->skb;
502 
503 	skb_dst_drop(skb);
504 	dst_hold((struct dst_entry *) priv->md);
505 	skb_dst_set(skb, (struct dst_entry *) priv->md);
506 }
507 
nft_tunnel_ip_dump(struct sk_buff * skb,struct ip_tunnel_info * info)508 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
509 {
510 	struct nlattr *nest;
511 
512 	if (info->mode & IP_TUNNEL_INFO_IPV6) {
513 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
514 		if (!nest)
515 			return -1;
516 
517 		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
518 				     &info->key.u.ipv6.src) < 0 ||
519 		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
520 				     &info->key.u.ipv6.dst) < 0 ||
521 		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
522 				 info->key.label)) {
523 			nla_nest_cancel(skb, nest);
524 			return -1;
525 		}
526 
527 		nla_nest_end(skb, nest);
528 	} else {
529 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
530 		if (!nest)
531 			return -1;
532 
533 		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
534 				    info->key.u.ipv4.src) < 0 ||
535 		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
536 				    info->key.u.ipv4.dst) < 0) {
537 			nla_nest_cancel(skb, nest);
538 			return -1;
539 		}
540 
541 		nla_nest_end(skb, nest);
542 	}
543 
544 	return 0;
545 }
546 
nft_tunnel_opts_dump(struct sk_buff * skb,struct nft_tunnel_obj * priv)547 static int nft_tunnel_opts_dump(struct sk_buff *skb,
548 				struct nft_tunnel_obj *priv)
549 {
550 	struct nft_tunnel_opts *opts = &priv->opts;
551 	struct nlattr *nest, *inner;
552 
553 	nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
554 	if (!nest)
555 		return -1;
556 
557 	if (opts->flags & TUNNEL_VXLAN_OPT) {
558 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
559 		if (!inner)
560 			goto failure;
561 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
562 				 htonl(opts->u.vxlan.gbp)))
563 			goto inner_failure;
564 		nla_nest_end(skb, inner);
565 	} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
566 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
567 		if (!inner)
568 			goto failure;
569 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
570 				 htonl(opts->u.erspan.version)))
571 			goto inner_failure;
572 		switch (opts->u.erspan.version) {
573 		case ERSPAN_VERSION:
574 			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
575 					 opts->u.erspan.u.index))
576 				goto inner_failure;
577 			break;
578 		case ERSPAN_VERSION2:
579 			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
580 				       get_hwid(&opts->u.erspan.u.md2)) ||
581 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
582 				       opts->u.erspan.u.md2.dir))
583 				goto inner_failure;
584 			break;
585 		}
586 		nla_nest_end(skb, inner);
587 	} else if (opts->flags & TUNNEL_GENEVE_OPT) {
588 		struct geneve_opt *opt;
589 		int offset = 0;
590 
591 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
592 		if (!inner)
593 			goto failure;
594 		while (opts->len > offset) {
595 			opt = (struct geneve_opt *)opts->u.data + offset;
596 			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
597 					 opt->opt_class) ||
598 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
599 				       opt->type) ||
600 			    nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
601 				    opt->length * 4, opt->opt_data))
602 				goto inner_failure;
603 			offset += sizeof(*opt) + opt->length * 4;
604 		}
605 		nla_nest_end(skb, inner);
606 	}
607 	nla_nest_end(skb, nest);
608 	return 0;
609 
610 inner_failure:
611 	nla_nest_cancel(skb, inner);
612 failure:
613 	nla_nest_cancel(skb, nest);
614 	return -1;
615 }
616 
nft_tunnel_ports_dump(struct sk_buff * skb,struct ip_tunnel_info * info)617 static int nft_tunnel_ports_dump(struct sk_buff *skb,
618 				 struct ip_tunnel_info *info)
619 {
620 	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
621 	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
622 		return -1;
623 
624 	return 0;
625 }
626 
nft_tunnel_flags_dump(struct sk_buff * skb,struct ip_tunnel_info * info)627 static int nft_tunnel_flags_dump(struct sk_buff *skb,
628 				 struct ip_tunnel_info *info)
629 {
630 	u32 flags = 0;
631 
632 	if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
633 		flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
634 	if (!(info->key.tun_flags & TUNNEL_CSUM))
635 		flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
636 	if (info->key.tun_flags & TUNNEL_SEQ)
637 		flags |= NFT_TUNNEL_F_SEQ_NUMBER;
638 
639 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
640 		return -1;
641 
642 	return 0;
643 }
644 
nft_tunnel_obj_dump(struct sk_buff * skb,struct nft_object * obj,bool reset)645 static int nft_tunnel_obj_dump(struct sk_buff *skb,
646 			       struct nft_object *obj, bool reset)
647 {
648 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
649 	struct ip_tunnel_info *info = &priv->md->u.tun_info;
650 
651 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
652 			 tunnel_id_to_key32(info->key.tun_id)) ||
653 	    nft_tunnel_ip_dump(skb, info) < 0 ||
654 	    nft_tunnel_ports_dump(skb, info) < 0 ||
655 	    nft_tunnel_flags_dump(skb, info) < 0 ||
656 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
657 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
658 	    nft_tunnel_opts_dump(skb, priv) < 0)
659 		goto nla_put_failure;
660 
661 	return 0;
662 
663 nla_put_failure:
664 	return -1;
665 }
666 
nft_tunnel_obj_destroy(const struct nft_ctx * ctx,struct nft_object * obj)667 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
668 				   struct nft_object *obj)
669 {
670 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
671 
672 	metadata_dst_free(priv->md);
673 }
674 
675 static struct nft_object_type nft_tunnel_obj_type;
676 static const struct nft_object_ops nft_tunnel_obj_ops = {
677 	.type		= &nft_tunnel_obj_type,
678 	.size		= sizeof(struct nft_tunnel_obj),
679 	.eval		= nft_tunnel_obj_eval,
680 	.init		= nft_tunnel_obj_init,
681 	.destroy	= nft_tunnel_obj_destroy,
682 	.dump		= nft_tunnel_obj_dump,
683 };
684 
685 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
686 	.type		= NFT_OBJECT_TUNNEL,
687 	.family		= NFPROTO_NETDEV,
688 	.ops		= &nft_tunnel_obj_ops,
689 	.maxattr	= NFTA_TUNNEL_KEY_MAX,
690 	.policy		= nft_tunnel_key_policy,
691 	.owner		= THIS_MODULE,
692 };
693 
nft_tunnel_module_init(void)694 static int __init nft_tunnel_module_init(void)
695 {
696 	int err;
697 
698 	err = nft_register_expr(&nft_tunnel_type);
699 	if (err < 0)
700 		return err;
701 
702 	err = nft_register_obj(&nft_tunnel_obj_type);
703 	if (err < 0)
704 		nft_unregister_expr(&nft_tunnel_type);
705 
706 	return err;
707 }
708 
nft_tunnel_module_exit(void)709 static void __exit nft_tunnel_module_exit(void)
710 {
711 	nft_unregister_obj(&nft_tunnel_obj_type);
712 	nft_unregister_expr(&nft_tunnel_type);
713 }
714 
715 module_init(nft_tunnel_module_init);
716 module_exit(nft_tunnel_module_exit);
717 
718 MODULE_LICENSE("GPL");
719 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
720 MODULE_ALIAS_NFT_EXPR("tunnel");
721 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
722 MODULE_DESCRIPTION("nftables tunnel expression support");
723