• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9 
10 extern unsigned int nf_tables_net_id;
11 
nft_flow_rule_alloc(int num_actions)12 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
13 {
14 	struct nft_flow_rule *flow;
15 
16 	flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
17 	if (!flow)
18 		return NULL;
19 
20 	flow->rule = flow_rule_alloc(num_actions);
21 	if (!flow->rule) {
22 		kfree(flow);
23 		return NULL;
24 	}
25 
26 	flow->rule->match.dissector	= &flow->match.dissector;
27 	flow->rule->match.mask		= &flow->match.mask;
28 	flow->rule->match.key		= &flow->match.key;
29 
30 	return flow;
31 }
32 
nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type)33 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
34 				 enum flow_dissector_key_id addr_type)
35 {
36 	struct nft_flow_match *match = &flow->match;
37 	struct nft_flow_key *mask = &match->mask;
38 	struct nft_flow_key *key = &match->key;
39 
40 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
41 		return;
42 
43 	key->control.addr_type = addr_type;
44 	mask->control.addr_type = 0xffff;
45 	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
46 	match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
47 		offsetof(struct nft_flow_key, control);
48 }
49 
50 struct nft_offload_ethertype {
51 	__be16 value;
52 	__be16 mask;
53 };
54 
nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow)55 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
56 					struct nft_flow_rule *flow)
57 {
58 	struct nft_flow_match *match = &flow->match;
59 	struct nft_offload_ethertype ethertype = {
60 		.value	= match->key.basic.n_proto,
61 		.mask	= match->mask.basic.n_proto,
62 	};
63 
64 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
65 	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
66 	     match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
67 		match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
68 		match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
69 		match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
70 		match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
71 		match->key.vlan.vlan_tpid = ethertype.value;
72 		match->mask.vlan.vlan_tpid = ethertype.mask;
73 		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
74 			offsetof(struct nft_flow_key, cvlan);
75 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
76 	} else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
77 		   (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
78 		    match->key.basic.n_proto == htons(ETH_P_8021AD))) {
79 		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
80 		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
81 		match->key.vlan.vlan_tpid = ethertype.value;
82 		match->mask.vlan.vlan_tpid = ethertype.mask;
83 		match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
84 			offsetof(struct nft_flow_key, vlan);
85 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
86 	}
87 }
88 
nft_flow_rule_create(struct net * net,const struct nft_rule * rule)89 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
90 					   const struct nft_rule *rule)
91 {
92 	struct nft_offload_ctx *ctx;
93 	struct nft_flow_rule *flow;
94 	int num_actions = 0, err;
95 	struct nft_expr *expr;
96 
97 	expr = nft_expr_first(rule);
98 	while (nft_expr_more(rule, expr)) {
99 		if (expr->ops->offload_action &&
100 		    expr->ops->offload_action(expr))
101 			num_actions++;
102 
103 		expr = nft_expr_next(expr);
104 	}
105 
106 	if (num_actions == 0)
107 		return ERR_PTR(-EOPNOTSUPP);
108 
109 	flow = nft_flow_rule_alloc(num_actions);
110 	if (!flow)
111 		return ERR_PTR(-ENOMEM);
112 
113 	expr = nft_expr_first(rule);
114 
115 	ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
116 	if (!ctx) {
117 		err = -ENOMEM;
118 		goto err_out;
119 	}
120 	ctx->net = net;
121 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
122 
123 	while (nft_expr_more(rule, expr)) {
124 		if (!expr->ops->offload) {
125 			err = -EOPNOTSUPP;
126 			goto err_out;
127 		}
128 		err = expr->ops->offload(ctx, flow, expr);
129 		if (err < 0)
130 			goto err_out;
131 
132 		expr = nft_expr_next(expr);
133 	}
134 	nft_flow_rule_transfer_vlan(ctx, flow);
135 
136 	flow->proto = ctx->dep.l3num;
137 	kfree(ctx);
138 
139 	return flow;
140 err_out:
141 	kfree(ctx);
142 	nft_flow_rule_destroy(flow);
143 
144 	return ERR_PTR(err);
145 }
146 
nft_flow_rule_destroy(struct nft_flow_rule * flow)147 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
148 {
149 	struct flow_action_entry *entry;
150 	int i;
151 
152 	flow_action_for_each(i, entry, &flow->rule->action) {
153 		switch (entry->id) {
154 		case FLOW_ACTION_REDIRECT:
155 		case FLOW_ACTION_MIRRED:
156 			dev_put(entry->dev);
157 			break;
158 		default:
159 			break;
160 		}
161 	}
162 	kfree(flow->rule);
163 	kfree(flow);
164 }
165 
nft_offload_set_dependency(struct nft_offload_ctx * ctx,enum nft_offload_dep_type type)166 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
167 				enum nft_offload_dep_type type)
168 {
169 	ctx->dep.type = type;
170 }
171 
nft_offload_update_dependency(struct nft_offload_ctx * ctx,const void * data,u32 len)172 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
173 				   const void *data, u32 len)
174 {
175 	switch (ctx->dep.type) {
176 	case NFT_OFFLOAD_DEP_NETWORK:
177 		WARN_ON(len != sizeof(__u16));
178 		memcpy(&ctx->dep.l3num, data, sizeof(__u16));
179 		break;
180 	case NFT_OFFLOAD_DEP_TRANSPORT:
181 		WARN_ON(len != sizeof(__u8));
182 		memcpy(&ctx->dep.protonum, data, sizeof(__u8));
183 		break;
184 	default:
185 		break;
186 	}
187 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
188 }
189 
nft_flow_offload_common_init(struct flow_cls_common_offload * common,__be16 proto,int priority,struct netlink_ext_ack * extack)190 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
191 					 __be16 proto, int priority,
192 					 struct netlink_ext_ack *extack)
193 {
194 	common->protocol = proto;
195 	common->prio = priority;
196 	common->extack = extack;
197 }
198 
nft_setup_cb_call(enum tc_setup_type type,void * type_data,struct list_head * cb_list)199 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
200 			     struct list_head *cb_list)
201 {
202 	struct flow_block_cb *block_cb;
203 	int err;
204 
205 	list_for_each_entry(block_cb, cb_list, list) {
206 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
207 		if (err < 0)
208 			return err;
209 	}
210 	return 0;
211 }
212 
nft_chain_offload_priority(const struct nft_base_chain * basechain)213 static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
214 {
215 	if (basechain->ops.priority <= 0 ||
216 	    basechain->ops.priority > USHRT_MAX)
217 		return -1;
218 
219 	return 0;
220 }
221 
nft_chain_offload_support(const struct nft_base_chain * basechain)222 bool nft_chain_offload_support(const struct nft_base_chain *basechain)
223 {
224 	struct net_device *dev;
225 	struct nft_hook *hook;
226 
227 	if (nft_chain_offload_priority(basechain) < 0)
228 		return false;
229 
230 	list_for_each_entry(hook, &basechain->hook_list, list) {
231 		if (hook->ops.pf != NFPROTO_NETDEV ||
232 		    hook->ops.hooknum != NF_NETDEV_INGRESS)
233 			return false;
234 
235 		dev = hook->ops.dev;
236 		if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists())
237 			return false;
238 	}
239 
240 	return true;
241 }
242 
nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command)243 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
244 				       const struct nft_base_chain *basechain,
245 				       const struct nft_rule *rule,
246 				       const struct nft_flow_rule *flow,
247 				       struct netlink_ext_ack *extack,
248 				       enum flow_cls_command command)
249 {
250 	__be16 proto = ETH_P_ALL;
251 
252 	memset(cls_flow, 0, sizeof(*cls_flow));
253 
254 	if (flow)
255 		proto = flow->proto;
256 
257 	nft_flow_offload_common_init(&cls_flow->common, proto,
258 				     basechain->ops.priority, extack);
259 	cls_flow->command = command;
260 	cls_flow->cookie = (unsigned long) rule;
261 	if (flow)
262 		cls_flow->rule = flow->rule;
263 }
264 
nft_flow_offload_rule(struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command)265 static int nft_flow_offload_rule(struct nft_chain *chain,
266 				 struct nft_rule *rule,
267 				 struct nft_flow_rule *flow,
268 				 enum flow_cls_command command)
269 {
270 	struct netlink_ext_ack extack = {};
271 	struct flow_cls_offload cls_flow;
272 	struct nft_base_chain *basechain;
273 
274 	if (!nft_is_base_chain(chain))
275 		return -EOPNOTSUPP;
276 
277 	basechain = nft_base_chain(chain);
278 	nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
279 				   command);
280 
281 	return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
282 				 &basechain->flow_block.cb_list);
283 }
284 
nft_flow_offload_bind(struct flow_block_offload * bo,struct nft_base_chain * basechain)285 static int nft_flow_offload_bind(struct flow_block_offload *bo,
286 				 struct nft_base_chain *basechain)
287 {
288 	list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
289 	return 0;
290 }
291 
nft_flow_offload_unbind(struct flow_block_offload * bo,struct nft_base_chain * basechain)292 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
293 				   struct nft_base_chain *basechain)
294 {
295 	struct flow_block_cb *block_cb, *next;
296 	struct flow_cls_offload cls_flow;
297 	struct netlink_ext_ack extack;
298 	struct nft_chain *chain;
299 	struct nft_rule *rule;
300 
301 	chain = &basechain->chain;
302 	list_for_each_entry(rule, &chain->rules, list) {
303 		memset(&extack, 0, sizeof(extack));
304 		nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
305 					   &extack, FLOW_CLS_DESTROY);
306 		nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
307 	}
308 
309 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
310 		list_del(&block_cb->list);
311 		flow_block_cb_free(block_cb);
312 	}
313 
314 	return 0;
315 }
316 
nft_block_setup(struct nft_base_chain * basechain,struct flow_block_offload * bo,enum flow_block_command cmd)317 static int nft_block_setup(struct nft_base_chain *basechain,
318 			   struct flow_block_offload *bo,
319 			   enum flow_block_command cmd)
320 {
321 	int err;
322 
323 	switch (cmd) {
324 	case FLOW_BLOCK_BIND:
325 		err = nft_flow_offload_bind(bo, basechain);
326 		break;
327 	case FLOW_BLOCK_UNBIND:
328 		err = nft_flow_offload_unbind(bo, basechain);
329 		break;
330 	default:
331 		WARN_ON_ONCE(1);
332 		err = -EOPNOTSUPP;
333 	}
334 
335 	return err;
336 }
337 
nft_flow_block_offload_init(struct flow_block_offload * bo,struct net * net,enum flow_block_command cmd,struct nft_base_chain * basechain,struct netlink_ext_ack * extack)338 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
339 					struct net *net,
340 					enum flow_block_command cmd,
341 					struct nft_base_chain *basechain,
342 					struct netlink_ext_ack *extack)
343 {
344 	memset(bo, 0, sizeof(*bo));
345 	bo->net		= net;
346 	bo->block	= &basechain->flow_block;
347 	bo->command	= cmd;
348 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
349 	bo->extack	= extack;
350 	bo->cb_list_head = &basechain->flow_block.cb_list;
351 	INIT_LIST_HEAD(&bo->cb_list);
352 }
353 
nft_block_offload_cmd(struct nft_base_chain * chain,struct net_device * dev,enum flow_block_command cmd)354 static int nft_block_offload_cmd(struct nft_base_chain *chain,
355 				 struct net_device *dev,
356 				 enum flow_block_command cmd)
357 {
358 	struct netlink_ext_ack extack = {};
359 	struct flow_block_offload bo;
360 	int err;
361 
362 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
363 
364 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
365 	if (err < 0)
366 		return err;
367 
368 	return nft_block_setup(chain, &bo, cmd);
369 }
370 
nft_indr_block_cleanup(struct flow_block_cb * block_cb)371 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
372 {
373 	struct nft_base_chain *basechain = block_cb->indr.data;
374 	struct net_device *dev = block_cb->indr.dev;
375 	struct netlink_ext_ack extack = {};
376 	struct nftables_pernet *nft_net;
377 	struct net *net = dev_net(dev);
378 	struct flow_block_offload bo;
379 
380 	nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
381 				    basechain, &extack);
382 	nft_net = net_generic(net, nf_tables_net_id);
383 	mutex_lock(&nft_net->commit_mutex);
384 	list_del(&block_cb->driver_list);
385 	list_move(&block_cb->list, &bo.cb_list);
386 	nft_flow_offload_unbind(&bo, basechain);
387 	mutex_unlock(&nft_net->commit_mutex);
388 }
389 
nft_indr_block_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)390 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
391 				      struct net_device *dev,
392 				      enum flow_block_command cmd)
393 {
394 	struct netlink_ext_ack extack = {};
395 	struct flow_block_offload bo;
396 	int err;
397 
398 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
399 
400 	err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
401 					  nft_indr_block_cleanup);
402 	if (err < 0)
403 		return err;
404 
405 	if (list_empty(&bo.cb_list))
406 		return -EOPNOTSUPP;
407 
408 	return nft_block_setup(basechain, &bo, cmd);
409 }
410 
nft_chain_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)411 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
412 				 struct net_device *dev,
413 				 enum flow_block_command cmd)
414 {
415 	int err;
416 
417 	if (dev->netdev_ops->ndo_setup_tc)
418 		err = nft_block_offload_cmd(basechain, dev, cmd);
419 	else
420 		err = nft_indr_block_offload_cmd(basechain, dev, cmd);
421 
422 	return err;
423 }
424 
nft_flow_block_chain(struct nft_base_chain * basechain,const struct net_device * this_dev,enum flow_block_command cmd)425 static int nft_flow_block_chain(struct nft_base_chain *basechain,
426 				const struct net_device *this_dev,
427 				enum flow_block_command cmd)
428 {
429 	struct net_device *dev;
430 	struct nft_hook *hook;
431 	int err, i = 0;
432 
433 	list_for_each_entry(hook, &basechain->hook_list, list) {
434 		dev = hook->ops.dev;
435 		if (this_dev && this_dev != dev)
436 			continue;
437 
438 		err = nft_chain_offload_cmd(basechain, dev, cmd);
439 		if (err < 0 && cmd == FLOW_BLOCK_BIND) {
440 			if (!this_dev)
441 				goto err_flow_block;
442 
443 			return err;
444 		}
445 		i++;
446 	}
447 
448 	return 0;
449 
450 err_flow_block:
451 	list_for_each_entry(hook, &basechain->hook_list, list) {
452 		if (i-- <= 0)
453 			break;
454 
455 		dev = hook->ops.dev;
456 		nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
457 	}
458 	return err;
459 }
460 
nft_flow_offload_chain(struct nft_chain * chain,u8 * ppolicy,enum flow_block_command cmd)461 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
462 				  enum flow_block_command cmd)
463 {
464 	struct nft_base_chain *basechain;
465 	u8 policy;
466 
467 	if (!nft_is_base_chain(chain))
468 		return -EOPNOTSUPP;
469 
470 	basechain = nft_base_chain(chain);
471 	policy = ppolicy ? *ppolicy : basechain->policy;
472 
473 	/* Only default policy to accept is supported for now. */
474 	if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
475 		return -EOPNOTSUPP;
476 
477 	return nft_flow_block_chain(basechain, NULL, cmd);
478 }
479 
nft_flow_rule_offload_abort(struct net * net,struct nft_trans * trans)480 static void nft_flow_rule_offload_abort(struct net *net,
481 					struct nft_trans *trans)
482 {
483 	struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
484 	int err = 0;
485 
486 	list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
487 		if (trans->ctx.family != NFPROTO_NETDEV)
488 			continue;
489 
490 		switch (trans->msg_type) {
491 		case NFT_MSG_NEWCHAIN:
492 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
493 			    nft_trans_chain_update(trans))
494 				continue;
495 
496 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
497 						     FLOW_BLOCK_UNBIND);
498 			break;
499 		case NFT_MSG_DELCHAIN:
500 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
501 				continue;
502 
503 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
504 						     FLOW_BLOCK_BIND);
505 			break;
506 		case NFT_MSG_NEWRULE:
507 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
508 				continue;
509 
510 			err = nft_flow_offload_rule(trans->ctx.chain,
511 						    nft_trans_rule(trans),
512 						    NULL, FLOW_CLS_DESTROY);
513 			break;
514 		case NFT_MSG_DELRULE:
515 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
516 				continue;
517 
518 			err = nft_flow_offload_rule(trans->ctx.chain,
519 						    nft_trans_rule(trans),
520 						    nft_trans_flow_rule(trans),
521 						    FLOW_CLS_REPLACE);
522 			break;
523 		}
524 
525 		if (WARN_ON_ONCE(err))
526 			break;
527 	}
528 }
529 
nft_flow_rule_offload_commit(struct net * net)530 int nft_flow_rule_offload_commit(struct net *net)
531 {
532 	struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
533 	struct nft_trans *trans;
534 	int err = 0;
535 	u8 policy;
536 
537 	list_for_each_entry(trans, &nft_net->commit_list, list) {
538 		if (trans->ctx.family != NFPROTO_NETDEV)
539 			continue;
540 
541 		switch (trans->msg_type) {
542 		case NFT_MSG_NEWCHAIN:
543 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
544 			    nft_trans_chain_update(trans))
545 				continue;
546 
547 			policy = nft_trans_chain_policy(trans);
548 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
549 						     FLOW_BLOCK_BIND);
550 			break;
551 		case NFT_MSG_DELCHAIN:
552 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
553 				continue;
554 
555 			policy = nft_trans_chain_policy(trans);
556 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
557 						     FLOW_BLOCK_UNBIND);
558 			break;
559 		case NFT_MSG_NEWRULE:
560 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
561 				continue;
562 
563 			if (trans->ctx.flags & NLM_F_REPLACE ||
564 			    !(trans->ctx.flags & NLM_F_APPEND)) {
565 				err = -EOPNOTSUPP;
566 				break;
567 			}
568 			err = nft_flow_offload_rule(trans->ctx.chain,
569 						    nft_trans_rule(trans),
570 						    nft_trans_flow_rule(trans),
571 						    FLOW_CLS_REPLACE);
572 			break;
573 		case NFT_MSG_DELRULE:
574 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
575 				continue;
576 
577 			err = nft_flow_offload_rule(trans->ctx.chain,
578 						    nft_trans_rule(trans),
579 						    NULL, FLOW_CLS_DESTROY);
580 			break;
581 		}
582 
583 		if (err) {
584 			nft_flow_rule_offload_abort(net, trans);
585 			break;
586 		}
587 	}
588 
589 	list_for_each_entry(trans, &nft_net->commit_list, list) {
590 		if (trans->ctx.family != NFPROTO_NETDEV)
591 			continue;
592 
593 		switch (trans->msg_type) {
594 		case NFT_MSG_NEWRULE:
595 		case NFT_MSG_DELRULE:
596 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
597 				continue;
598 
599 			nft_flow_rule_destroy(nft_trans_flow_rule(trans));
600 			break;
601 		default:
602 			break;
603 		}
604 	}
605 
606 	return err;
607 }
608 
__nft_offload_get_chain(const struct nftables_pernet * nft_net,struct net_device * dev)609 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
610 						 struct net_device *dev)
611 {
612 	struct nft_base_chain *basechain;
613 	struct nft_hook *hook, *found;
614 	const struct nft_table *table;
615 	struct nft_chain *chain;
616 
617 	list_for_each_entry(table, &nft_net->tables, list) {
618 		if (table->family != NFPROTO_NETDEV)
619 			continue;
620 
621 		list_for_each_entry(chain, &table->chains, list) {
622 			if (!nft_is_base_chain(chain) ||
623 			    !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
624 				continue;
625 
626 			found = NULL;
627 			basechain = nft_base_chain(chain);
628 			list_for_each_entry(hook, &basechain->hook_list, list) {
629 				if (hook->ops.dev != dev)
630 					continue;
631 
632 				found = hook;
633 				break;
634 			}
635 			if (!found)
636 				continue;
637 
638 			return chain;
639 		}
640 	}
641 
642 	return NULL;
643 }
644 
nft_offload_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)645 static int nft_offload_netdev_event(struct notifier_block *this,
646 				    unsigned long event, void *ptr)
647 {
648 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
649 	struct nftables_pernet *nft_net;
650 	struct net *net = dev_net(dev);
651 	struct nft_chain *chain;
652 
653 	if (event != NETDEV_UNREGISTER)
654 		return NOTIFY_DONE;
655 
656 	nft_net = net_generic(net, nf_tables_net_id);
657 	mutex_lock(&nft_net->commit_mutex);
658 	chain = __nft_offload_get_chain(nft_net, dev);
659 	if (chain)
660 		nft_flow_block_chain(nft_base_chain(chain), dev,
661 				     FLOW_BLOCK_UNBIND);
662 
663 	mutex_unlock(&nft_net->commit_mutex);
664 
665 	return NOTIFY_DONE;
666 }
667 
668 static struct notifier_block nft_offload_netdev_notifier = {
669 	.notifier_call	= nft_offload_netdev_event,
670 };
671 
nft_offload_init(void)672 int nft_offload_init(void)
673 {
674 	return register_netdevice_notifier(&nft_offload_netdev_notifier);
675 }
676 
nft_offload_exit(void)677 void nft_offload_exit(void)
678 {
679 	unregister_netdevice_notifier(&nft_offload_netdev_notifier);
680 }
681