1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/netlink.h>
6 #include <linux/netfilter.h>
7 #include <linux/workqueue.h>
8 #include <linux/spinlock.h>
9 #include <linux/netfilter/nf_conntrack_common.h>
10 #include <linux/netfilter/nf_tables.h>
11 #include <net/ip.h> /* for ipv4 options. */
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nf_tables_core.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_extend.h>
16 #include <net/netfilter/nf_flow_table.h>
17
18 struct nft_flow_offload {
19 struct nft_flowtable *flowtable;
20 };
21
nft_xmit_type(struct dst_entry * dst)22 static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
23 {
24 if (dst_xfrm(dst))
25 return FLOW_OFFLOAD_XMIT_XFRM;
26
27 return FLOW_OFFLOAD_XMIT_NEIGH;
28 }
29
nft_default_forward_path(struct nf_flow_route * route,struct dst_entry * dst_cache,enum ip_conntrack_dir dir)30 static void nft_default_forward_path(struct nf_flow_route *route,
31 struct dst_entry *dst_cache,
32 enum ip_conntrack_dir dir)
33 {
34 route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
35 route->tuple[dir].dst = dst_cache;
36 route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
37 }
38
nft_is_valid_ether_device(const struct net_device * dev)39 static bool nft_is_valid_ether_device(const struct net_device *dev)
40 {
41 if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
42 dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
43 return false;
44
45 return true;
46 }
47
nft_dev_fill_forward_path(const struct nf_flow_route * route,const struct dst_entry * dst_cache,const struct nf_conn * ct,enum ip_conntrack_dir dir,u8 * ha,struct net_device_path_stack * stack)48 static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
49 const struct dst_entry *dst_cache,
50 const struct nf_conn *ct,
51 enum ip_conntrack_dir dir, u8 *ha,
52 struct net_device_path_stack *stack)
53 {
54 const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
55 struct net_device *dev = dst_cache->dev;
56 struct neighbour *n;
57 u8 nud_state;
58
59 if (!nft_is_valid_ether_device(dev))
60 goto out;
61
62 n = dst_neigh_lookup(dst_cache, daddr);
63 if (!n)
64 return -1;
65
66 read_lock_bh(&n->lock);
67 nud_state = n->nud_state;
68 ether_addr_copy(ha, n->ha);
69 read_unlock_bh(&n->lock);
70 neigh_release(n);
71
72 if (!(nud_state & NUD_VALID))
73 return -1;
74
75 out:
76 return dev_fill_forward_path(dev, ha, stack);
77 }
78
79 struct nft_forward_info {
80 const struct net_device *indev;
81 const struct net_device *outdev;
82 const struct net_device *hw_outdev;
83 struct id {
84 __u16 id;
85 __be16 proto;
86 } encap[NF_FLOW_TABLE_ENCAP_MAX];
87 u8 num_encaps;
88 u8 ingress_vlans;
89 u8 h_source[ETH_ALEN];
90 u8 h_dest[ETH_ALEN];
91 enum flow_offload_xmit_type xmit_type;
92 };
93
nft_dev_path_info(const struct net_device_path_stack * stack,struct nft_forward_info * info,unsigned char * ha,struct nf_flowtable * flowtable)94 static void nft_dev_path_info(const struct net_device_path_stack *stack,
95 struct nft_forward_info *info,
96 unsigned char *ha, struct nf_flowtable *flowtable)
97 {
98 const struct net_device_path *path;
99 int i;
100
101 memcpy(info->h_dest, ha, ETH_ALEN);
102
103 for (i = 0; i < stack->num_paths; i++) {
104 path = &stack->path[i];
105 switch (path->type) {
106 case DEV_PATH_ETHERNET:
107 case DEV_PATH_DSA:
108 case DEV_PATH_VLAN:
109 case DEV_PATH_PPPOE:
110 info->indev = path->dev;
111 if (is_zero_ether_addr(info->h_source))
112 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
113
114 if (path->type == DEV_PATH_ETHERNET)
115 break;
116 if (path->type == DEV_PATH_DSA) {
117 i = stack->num_paths;
118 break;
119 }
120
121 /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
122 if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
123 info->indev = NULL;
124 break;
125 }
126 if (!info->outdev)
127 info->outdev = path->dev;
128 info->encap[info->num_encaps].id = path->encap.id;
129 info->encap[info->num_encaps].proto = path->encap.proto;
130 info->num_encaps++;
131 if (path->type == DEV_PATH_PPPOE)
132 memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
133 break;
134 case DEV_PATH_BRIDGE:
135 if (is_zero_ether_addr(info->h_source))
136 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
137
138 switch (path->bridge.vlan_mode) {
139 case DEV_PATH_BR_VLAN_UNTAG_HW:
140 info->ingress_vlans |= BIT(info->num_encaps - 1);
141 break;
142 case DEV_PATH_BR_VLAN_TAG:
143 info->encap[info->num_encaps].id = path->bridge.vlan_id;
144 info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
145 info->num_encaps++;
146 break;
147 case DEV_PATH_BR_VLAN_UNTAG:
148 info->num_encaps--;
149 break;
150 case DEV_PATH_BR_VLAN_KEEP:
151 break;
152 }
153 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
154 break;
155 default:
156 info->indev = NULL;
157 break;
158 }
159 }
160 if (!info->outdev)
161 info->outdev = info->indev;
162
163 info->hw_outdev = info->indev;
164
165 if (nf_flowtable_hw_offload(flowtable) &&
166 nft_is_valid_ether_device(info->indev))
167 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
168 }
169
nft_flowtable_find_dev(const struct net_device * dev,struct nft_flowtable * ft)170 static bool nft_flowtable_find_dev(const struct net_device *dev,
171 struct nft_flowtable *ft)
172 {
173 struct nft_hook *hook;
174 bool found = false;
175
176 list_for_each_entry_rcu(hook, &ft->hook_list, list) {
177 if (hook->ops.dev != dev)
178 continue;
179
180 found = true;
181 break;
182 }
183
184 return found;
185 }
186
nft_dev_forward_path(struct nf_flow_route * route,const struct nf_conn * ct,enum ip_conntrack_dir dir,struct nft_flowtable * ft)187 static void nft_dev_forward_path(struct nf_flow_route *route,
188 const struct nf_conn *ct,
189 enum ip_conntrack_dir dir,
190 struct nft_flowtable *ft)
191 {
192 const struct dst_entry *dst = route->tuple[dir].dst;
193 struct net_device_path_stack stack;
194 struct nft_forward_info info = {};
195 unsigned char ha[ETH_ALEN];
196 int i;
197
198 if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
199 nft_dev_path_info(&stack, &info, ha, &ft->data);
200
201 if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
202 return;
203
204 route->tuple[!dir].in.ifindex = info.indev->ifindex;
205 for (i = 0; i < info.num_encaps; i++) {
206 route->tuple[!dir].in.encap[i].id = info.encap[i].id;
207 route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
208 }
209 route->tuple[!dir].in.num_encaps = info.num_encaps;
210 route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
211
212 if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
213 memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
214 memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
215 route->tuple[dir].out.ifindex = info.outdev->ifindex;
216 route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
217 route->tuple[dir].xmit_type = info.xmit_type;
218 }
219 }
220
nft_flow_route(const struct nft_pktinfo * pkt,const struct nf_conn * ct,struct nf_flow_route * route,enum ip_conntrack_dir dir,struct nft_flowtable * ft)221 static int nft_flow_route(const struct nft_pktinfo *pkt,
222 const struct nf_conn *ct,
223 struct nf_flow_route *route,
224 enum ip_conntrack_dir dir,
225 struct nft_flowtable *ft)
226 {
227 struct dst_entry *this_dst = skb_dst(pkt->skb);
228 struct dst_entry *other_dst = NULL;
229 struct flowi fl;
230
231 memset(&fl, 0, sizeof(fl));
232 switch (nft_pf(pkt)) {
233 case NFPROTO_IPV4:
234 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
235 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
236 break;
237 case NFPROTO_IPV6:
238 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
239 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
240 break;
241 }
242
243 nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
244 if (!other_dst)
245 return -ENOENT;
246
247 nft_default_forward_path(route, this_dst, dir);
248 nft_default_forward_path(route, other_dst, !dir);
249
250 if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
251 route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
252 nft_dev_forward_path(route, ct, dir, ft);
253 nft_dev_forward_path(route, ct, !dir, ft);
254 }
255
256 return 0;
257 }
258
nft_flow_offload_skip(struct sk_buff * skb,int family)259 static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
260 {
261 if (skb_sec_path(skb))
262 return true;
263
264 if (family == NFPROTO_IPV4) {
265 const struct ip_options *opt;
266
267 opt = &(IPCB(skb)->opt);
268
269 if (unlikely(opt->optlen))
270 return true;
271 }
272
273 return false;
274 }
275
nft_flow_offload_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)276 static void nft_flow_offload_eval(const struct nft_expr *expr,
277 struct nft_regs *regs,
278 const struct nft_pktinfo *pkt)
279 {
280 struct nft_flow_offload *priv = nft_expr_priv(expr);
281 struct nf_flowtable *flowtable = &priv->flowtable->data;
282 struct tcphdr _tcph, *tcph = NULL;
283 struct nf_flow_route route = {};
284 enum ip_conntrack_info ctinfo;
285 struct flow_offload *flow;
286 enum ip_conntrack_dir dir;
287 struct nf_conn *ct;
288 int ret;
289
290 if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
291 goto out;
292
293 ct = nf_ct_get(pkt->skb, &ctinfo);
294 if (!ct)
295 goto out;
296
297 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
298 case IPPROTO_TCP:
299 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
300 sizeof(_tcph), &_tcph);
301 if (unlikely(!tcph || tcph->fin || tcph->rst ||
302 !nf_conntrack_tcp_established(ct)))
303 goto out;
304 break;
305 case IPPROTO_UDP:
306 break;
307 default:
308 goto out;
309 }
310
311 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
312 ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
313 goto out;
314
315 if (!nf_ct_is_confirmed(ct))
316 goto out;
317
318 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
319 goto out;
320
321 dir = CTINFO2DIR(ctinfo);
322 if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
323 goto err_flow_route;
324
325 flow = flow_offload_alloc(ct);
326 if (!flow)
327 goto err_flow_alloc;
328
329 if (flow_offload_route_init(flow, &route) < 0)
330 goto err_flow_add;
331
332 if (tcph) {
333 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
334 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
335 }
336
337 ret = flow_offload_add(flowtable, flow);
338 if (ret < 0)
339 goto err_flow_add;
340
341 dst_release(route.tuple[!dir].dst);
342 return;
343
344 err_flow_add:
345 flow_offload_free(flow);
346 err_flow_alloc:
347 dst_release(route.tuple[!dir].dst);
348 err_flow_route:
349 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
350 out:
351 regs->verdict.code = NFT_BREAK;
352 }
353
nft_flow_offload_validate(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nft_data ** data)354 static int nft_flow_offload_validate(const struct nft_ctx *ctx,
355 const struct nft_expr *expr,
356 const struct nft_data **data)
357 {
358 unsigned int hook_mask = (1 << NF_INET_FORWARD);
359
360 if (ctx->family != NFPROTO_IPV4 &&
361 ctx->family != NFPROTO_IPV6 &&
362 ctx->family != NFPROTO_INET)
363 return -EOPNOTSUPP;
364
365 return nft_chain_validate_hooks(ctx->chain, hook_mask);
366 }
367
368 static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
369 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
370 .len = NFT_NAME_MAXLEN - 1 },
371 };
372
nft_flow_offload_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])373 static int nft_flow_offload_init(const struct nft_ctx *ctx,
374 const struct nft_expr *expr,
375 const struct nlattr * const tb[])
376 {
377 struct nft_flow_offload *priv = nft_expr_priv(expr);
378 u8 genmask = nft_genmask_next(ctx->net);
379 struct nft_flowtable *flowtable;
380
381 if (!tb[NFTA_FLOW_TABLE_NAME])
382 return -EINVAL;
383
384 flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
385 genmask);
386 if (IS_ERR(flowtable))
387 return PTR_ERR(flowtable);
388
389 if (!nft_use_inc(&flowtable->use))
390 return -EMFILE;
391
392 priv->flowtable = flowtable;
393
394 return nf_ct_netns_get(ctx->net, ctx->family);
395 }
396
nft_flow_offload_deactivate(const struct nft_ctx * ctx,const struct nft_expr * expr,enum nft_trans_phase phase)397 static void nft_flow_offload_deactivate(const struct nft_ctx *ctx,
398 const struct nft_expr *expr,
399 enum nft_trans_phase phase)
400 {
401 struct nft_flow_offload *priv = nft_expr_priv(expr);
402
403 nf_tables_deactivate_flowtable(ctx, priv->flowtable, phase);
404 }
405
nft_flow_offload_activate(const struct nft_ctx * ctx,const struct nft_expr * expr)406 static void nft_flow_offload_activate(const struct nft_ctx *ctx,
407 const struct nft_expr *expr)
408 {
409 struct nft_flow_offload *priv = nft_expr_priv(expr);
410
411 nft_use_inc_restore(&priv->flowtable->use);
412 }
413
nft_flow_offload_destroy(const struct nft_ctx * ctx,const struct nft_expr * expr)414 static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
415 const struct nft_expr *expr)
416 {
417 nf_ct_netns_put(ctx->net, ctx->family);
418 }
419
nft_flow_offload_dump(struct sk_buff * skb,const struct nft_expr * expr)420 static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
421 {
422 struct nft_flow_offload *priv = nft_expr_priv(expr);
423
424 if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
425 goto nla_put_failure;
426
427 return 0;
428
429 nla_put_failure:
430 return -1;
431 }
432
433 static struct nft_expr_type nft_flow_offload_type;
434 static const struct nft_expr_ops nft_flow_offload_ops = {
435 .type = &nft_flow_offload_type,
436 .size = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
437 .eval = nft_flow_offload_eval,
438 .init = nft_flow_offload_init,
439 .activate = nft_flow_offload_activate,
440 .deactivate = nft_flow_offload_deactivate,
441 .destroy = nft_flow_offload_destroy,
442 .validate = nft_flow_offload_validate,
443 .dump = nft_flow_offload_dump,
444 };
445
446 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
447 .name = "flow_offload",
448 .ops = &nft_flow_offload_ops,
449 .policy = nft_flow_offload_policy,
450 .maxattr = NFTA_FLOW_MAX,
451 .owner = THIS_MODULE,
452 };
453
flow_offload_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)454 static int flow_offload_netdev_event(struct notifier_block *this,
455 unsigned long event, void *ptr)
456 {
457 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
458
459 if (event != NETDEV_DOWN)
460 return NOTIFY_DONE;
461
462 nf_flow_table_cleanup(dev);
463
464 return NOTIFY_DONE;
465 }
466
467 static struct notifier_block flow_offload_netdev_notifier = {
468 .notifier_call = flow_offload_netdev_event,
469 };
470
nft_flow_offload_module_init(void)471 static int __init nft_flow_offload_module_init(void)
472 {
473 int err;
474
475 err = register_netdevice_notifier(&flow_offload_netdev_notifier);
476 if (err)
477 goto err;
478
479 err = nft_register_expr(&nft_flow_offload_type);
480 if (err < 0)
481 goto register_expr;
482
483 return 0;
484
485 register_expr:
486 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
487 err:
488 return err;
489 }
490
nft_flow_offload_module_exit(void)491 static void __exit nft_flow_offload_module_exit(void)
492 {
493 nft_unregister_expr(&nft_flow_offload_type);
494 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
495 }
496
497 module_init(nft_flow_offload_module_init);
498 module_exit(nft_flow_offload_module_exit);
499
500 MODULE_LICENSE("GPL");
501 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
502 MODULE_ALIAS_NFT_EXPR("flow_offload");
503 MODULE_DESCRIPTION("nftables hardware flow offload module");
504