1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
7 #include <linux/rhashtable.h>
8
flow_rule_alloc(unsigned int num_actions)9 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
10 {
11 struct flow_rule *rule;
12 int i;
13
14 rule = kzalloc(struct_size(rule, action.entries, num_actions),
15 GFP_KERNEL);
16 if (!rule)
17 return NULL;
18
19 rule->action.num_entries = num_actions;
20 /* Pre-fill each action hw_stats with DONT_CARE.
21 * Caller can override this if it wants stats for a given action.
22 */
23 for (i = 0; i < num_actions; i++)
24 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
25
26 return rule;
27 }
28 EXPORT_SYMBOL(flow_rule_alloc);
29
30 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
31 const struct flow_match *__m = &(__rule)->match; \
32 struct flow_dissector *__d = (__m)->dissector; \
33 \
34 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
35 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
36
flow_rule_match_meta(const struct flow_rule * rule,struct flow_match_meta * out)37 void flow_rule_match_meta(const struct flow_rule *rule,
38 struct flow_match_meta *out)
39 {
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
41 }
42 EXPORT_SYMBOL(flow_rule_match_meta);
43
flow_rule_match_basic(const struct flow_rule * rule,struct flow_match_basic * out)44 void flow_rule_match_basic(const struct flow_rule *rule,
45 struct flow_match_basic *out)
46 {
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
48 }
49 EXPORT_SYMBOL(flow_rule_match_basic);
50
flow_rule_match_control(const struct flow_rule * rule,struct flow_match_control * out)51 void flow_rule_match_control(const struct flow_rule *rule,
52 struct flow_match_control *out)
53 {
54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
55 }
56 EXPORT_SYMBOL(flow_rule_match_control);
57
flow_rule_match_eth_addrs(const struct flow_rule * rule,struct flow_match_eth_addrs * out)58 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
59 struct flow_match_eth_addrs *out)
60 {
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
64
flow_rule_match_vlan(const struct flow_rule * rule,struct flow_match_vlan * out)65 void flow_rule_match_vlan(const struct flow_rule *rule,
66 struct flow_match_vlan *out)
67 {
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_vlan);
71
flow_rule_match_cvlan(const struct flow_rule * rule,struct flow_match_vlan * out)72 void flow_rule_match_cvlan(const struct flow_rule *rule,
73 struct flow_match_vlan *out)
74 {
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_cvlan);
78
flow_rule_match_ipv4_addrs(const struct flow_rule * rule,struct flow_match_ipv4_addrs * out)79 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
80 struct flow_match_ipv4_addrs *out)
81 {
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
85
flow_rule_match_ipv6_addrs(const struct flow_rule * rule,struct flow_match_ipv6_addrs * out)86 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
87 struct flow_match_ipv6_addrs *out)
88 {
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
92
flow_rule_match_ip(const struct flow_rule * rule,struct flow_match_ip * out)93 void flow_rule_match_ip(const struct flow_rule *rule,
94 struct flow_match_ip *out)
95 {
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_ip);
99
flow_rule_match_ports(const struct flow_rule * rule,struct flow_match_ports * out)100 void flow_rule_match_ports(const struct flow_rule *rule,
101 struct flow_match_ports *out)
102 {
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_ports);
106
flow_rule_match_tcp(const struct flow_rule * rule,struct flow_match_tcp * out)107 void flow_rule_match_tcp(const struct flow_rule *rule,
108 struct flow_match_tcp *out)
109 {
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_tcp);
113
flow_rule_match_icmp(const struct flow_rule * rule,struct flow_match_icmp * out)114 void flow_rule_match_icmp(const struct flow_rule *rule,
115 struct flow_match_icmp *out)
116 {
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_icmp);
120
flow_rule_match_mpls(const struct flow_rule * rule,struct flow_match_mpls * out)121 void flow_rule_match_mpls(const struct flow_rule *rule,
122 struct flow_match_mpls *out)
123 {
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_mpls);
127
flow_rule_match_enc_control(const struct flow_rule * rule,struct flow_match_control * out)128 void flow_rule_match_enc_control(const struct flow_rule *rule,
129 struct flow_match_control *out)
130 {
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_enc_control);
134
flow_rule_match_enc_ipv4_addrs(const struct flow_rule * rule,struct flow_match_ipv4_addrs * out)135 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
136 struct flow_match_ipv4_addrs *out)
137 {
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
141
flow_rule_match_enc_ipv6_addrs(const struct flow_rule * rule,struct flow_match_ipv6_addrs * out)142 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
143 struct flow_match_ipv6_addrs *out)
144 {
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
148
flow_rule_match_enc_ip(const struct flow_rule * rule,struct flow_match_ip * out)149 void flow_rule_match_enc_ip(const struct flow_rule *rule,
150 struct flow_match_ip *out)
151 {
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_enc_ip);
155
flow_rule_match_enc_ports(const struct flow_rule * rule,struct flow_match_ports * out)156 void flow_rule_match_enc_ports(const struct flow_rule *rule,
157 struct flow_match_ports *out)
158 {
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_ports);
162
flow_rule_match_enc_keyid(const struct flow_rule * rule,struct flow_match_enc_keyid * out)163 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
164 struct flow_match_enc_keyid *out)
165 {
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
169
flow_rule_match_enc_opts(const struct flow_rule * rule,struct flow_match_enc_opts * out)170 void flow_rule_match_enc_opts(const struct flow_rule *rule,
171 struct flow_match_enc_opts *out)
172 {
173 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
174 }
175 EXPORT_SYMBOL(flow_rule_match_enc_opts);
176
flow_action_cookie_create(void * data,unsigned int len,gfp_t gfp)177 struct flow_action_cookie *flow_action_cookie_create(void *data,
178 unsigned int len,
179 gfp_t gfp)
180 {
181 struct flow_action_cookie *cookie;
182
183 cookie = kmalloc(sizeof(*cookie) + len, gfp);
184 if (!cookie)
185 return NULL;
186 cookie->cookie_len = len;
187 memcpy(cookie->cookie, data, len);
188 return cookie;
189 }
190 EXPORT_SYMBOL(flow_action_cookie_create);
191
flow_action_cookie_destroy(struct flow_action_cookie * cookie)192 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
193 {
194 kfree(cookie);
195 }
196 EXPORT_SYMBOL(flow_action_cookie_destroy);
197
flow_rule_match_ct(const struct flow_rule * rule,struct flow_match_ct * out)198 void flow_rule_match_ct(const struct flow_rule *rule,
199 struct flow_match_ct *out)
200 {
201 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
202 }
203 EXPORT_SYMBOL(flow_rule_match_ct);
204
flow_block_cb_alloc(flow_setup_cb_t * cb,void * cb_ident,void * cb_priv,void (* release)(void * cb_priv))205 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
206 void *cb_ident, void *cb_priv,
207 void (*release)(void *cb_priv))
208 {
209 struct flow_block_cb *block_cb;
210
211 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
212 if (!block_cb)
213 return ERR_PTR(-ENOMEM);
214
215 block_cb->cb = cb;
216 block_cb->cb_ident = cb_ident;
217 block_cb->cb_priv = cb_priv;
218 block_cb->release = release;
219
220 return block_cb;
221 }
222 EXPORT_SYMBOL(flow_block_cb_alloc);
223
flow_block_cb_free(struct flow_block_cb * block_cb)224 void flow_block_cb_free(struct flow_block_cb *block_cb)
225 {
226 if (block_cb->release)
227 block_cb->release(block_cb->cb_priv);
228
229 kfree(block_cb);
230 }
231 EXPORT_SYMBOL(flow_block_cb_free);
232
flow_block_cb_lookup(struct flow_block * block,flow_setup_cb_t * cb,void * cb_ident)233 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
234 flow_setup_cb_t *cb, void *cb_ident)
235 {
236 struct flow_block_cb *block_cb;
237
238 list_for_each_entry(block_cb, &block->cb_list, list) {
239 if (block_cb->cb == cb &&
240 block_cb->cb_ident == cb_ident)
241 return block_cb;
242 }
243
244 return NULL;
245 }
246 EXPORT_SYMBOL(flow_block_cb_lookup);
247
flow_block_cb_priv(struct flow_block_cb * block_cb)248 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
249 {
250 return block_cb->cb_priv;
251 }
252 EXPORT_SYMBOL(flow_block_cb_priv);
253
flow_block_cb_incref(struct flow_block_cb * block_cb)254 void flow_block_cb_incref(struct flow_block_cb *block_cb)
255 {
256 block_cb->refcnt++;
257 }
258 EXPORT_SYMBOL(flow_block_cb_incref);
259
flow_block_cb_decref(struct flow_block_cb * block_cb)260 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
261 {
262 return --block_cb->refcnt;
263 }
264 EXPORT_SYMBOL(flow_block_cb_decref);
265
flow_block_cb_is_busy(flow_setup_cb_t * cb,void * cb_ident,struct list_head * driver_block_list)266 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
267 struct list_head *driver_block_list)
268 {
269 struct flow_block_cb *block_cb;
270
271 list_for_each_entry(block_cb, driver_block_list, driver_list) {
272 if (block_cb->cb == cb &&
273 block_cb->cb_ident == cb_ident)
274 return true;
275 }
276
277 return false;
278 }
279 EXPORT_SYMBOL(flow_block_cb_is_busy);
280
flow_block_cb_setup_simple(struct flow_block_offload * f,struct list_head * driver_block_list,flow_setup_cb_t * cb,void * cb_ident,void * cb_priv,bool ingress_only)281 int flow_block_cb_setup_simple(struct flow_block_offload *f,
282 struct list_head *driver_block_list,
283 flow_setup_cb_t *cb,
284 void *cb_ident, void *cb_priv,
285 bool ingress_only)
286 {
287 struct flow_block_cb *block_cb;
288
289 if (ingress_only &&
290 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
291 return -EOPNOTSUPP;
292
293 f->driver_block_list = driver_block_list;
294
295 switch (f->command) {
296 case FLOW_BLOCK_BIND:
297 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
298 return -EBUSY;
299
300 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
301 if (IS_ERR(block_cb))
302 return PTR_ERR(block_cb);
303
304 flow_block_cb_add(block_cb, f);
305 list_add_tail(&block_cb->driver_list, driver_block_list);
306 return 0;
307 case FLOW_BLOCK_UNBIND:
308 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
309 if (!block_cb)
310 return -ENOENT;
311
312 flow_block_cb_remove(block_cb, f);
313 list_del(&block_cb->driver_list);
314 return 0;
315 default:
316 return -EOPNOTSUPP;
317 }
318 }
319 EXPORT_SYMBOL(flow_block_cb_setup_simple);
320
321 static DEFINE_MUTEX(flow_indr_block_lock);
322 static LIST_HEAD(flow_block_indr_list);
323 static LIST_HEAD(flow_block_indr_dev_list);
324 static LIST_HEAD(flow_indir_dev_list);
325
326 struct flow_indr_dev {
327 struct list_head list;
328 flow_indr_block_bind_cb_t *cb;
329 void *cb_priv;
330 refcount_t refcnt;
331 };
332
flow_indr_dev_alloc(flow_indr_block_bind_cb_t * cb,void * cb_priv)333 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
334 void *cb_priv)
335 {
336 struct flow_indr_dev *indr_dev;
337
338 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
339 if (!indr_dev)
340 return NULL;
341
342 indr_dev->cb = cb;
343 indr_dev->cb_priv = cb_priv;
344 refcount_set(&indr_dev->refcnt, 1);
345
346 return indr_dev;
347 }
348
349 struct flow_indir_dev_info {
350 void *data;
351 struct net_device *dev;
352 struct Qdisc *sch;
353 enum tc_setup_type type;
354 void (*cleanup)(struct flow_block_cb *block_cb);
355 struct list_head list;
356 enum flow_block_command command;
357 enum flow_block_binder_type binder_type;
358 struct list_head *cb_list;
359 };
360
existing_qdiscs_register(flow_indr_block_bind_cb_t * cb,void * cb_priv)361 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
362 {
363 struct flow_block_offload bo;
364 struct flow_indir_dev_info *cur;
365
366 list_for_each_entry(cur, &flow_indir_dev_list, list) {
367 memset(&bo, 0, sizeof(bo));
368 bo.command = cur->command;
369 bo.binder_type = cur->binder_type;
370 INIT_LIST_HEAD(&bo.cb_list);
371 cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
372 list_splice(&bo.cb_list, cur->cb_list);
373 }
374 }
375
flow_indr_dev_register(flow_indr_block_bind_cb_t * cb,void * cb_priv)376 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
377 {
378 struct flow_indr_dev *indr_dev;
379
380 mutex_lock(&flow_indr_block_lock);
381 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
382 if (indr_dev->cb == cb &&
383 indr_dev->cb_priv == cb_priv) {
384 refcount_inc(&indr_dev->refcnt);
385 mutex_unlock(&flow_indr_block_lock);
386 return 0;
387 }
388 }
389
390 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
391 if (!indr_dev) {
392 mutex_unlock(&flow_indr_block_lock);
393 return -ENOMEM;
394 }
395
396 list_add(&indr_dev->list, &flow_block_indr_dev_list);
397 existing_qdiscs_register(cb, cb_priv);
398 mutex_unlock(&flow_indr_block_lock);
399
400 return 0;
401 }
402 EXPORT_SYMBOL(flow_indr_dev_register);
403
__flow_block_indr_cleanup(void (* release)(void * cb_priv),void * cb_priv,struct list_head * cleanup_list)404 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
405 void *cb_priv,
406 struct list_head *cleanup_list)
407 {
408 struct flow_block_cb *this, *next;
409
410 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
411 if (this->release == release &&
412 this->indr.cb_priv == cb_priv)
413 list_move(&this->indr.list, cleanup_list);
414 }
415 }
416
flow_block_indr_notify(struct list_head * cleanup_list)417 static void flow_block_indr_notify(struct list_head *cleanup_list)
418 {
419 struct flow_block_cb *this, *next;
420
421 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
422 list_del(&this->indr.list);
423 this->indr.cleanup(this);
424 }
425 }
426
flow_indr_dev_unregister(flow_indr_block_bind_cb_t * cb,void * cb_priv,void (* release)(void * cb_priv))427 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
428 void (*release)(void *cb_priv))
429 {
430 struct flow_indr_dev *this, *next, *indr_dev = NULL;
431 LIST_HEAD(cleanup_list);
432
433 mutex_lock(&flow_indr_block_lock);
434 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
435 if (this->cb == cb &&
436 this->cb_priv == cb_priv &&
437 refcount_dec_and_test(&this->refcnt)) {
438 indr_dev = this;
439 list_del(&indr_dev->list);
440 break;
441 }
442 }
443
444 if (!indr_dev) {
445 mutex_unlock(&flow_indr_block_lock);
446 return;
447 }
448
449 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
450 mutex_unlock(&flow_indr_block_lock);
451
452 flow_block_indr_notify(&cleanup_list);
453 kfree(indr_dev);
454 }
455 EXPORT_SYMBOL(flow_indr_dev_unregister);
456
flow_block_indr_init(struct flow_block_cb * flow_block,struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,void * data,void * cb_priv,void (* cleanup)(struct flow_block_cb * block_cb))457 static void flow_block_indr_init(struct flow_block_cb *flow_block,
458 struct flow_block_offload *bo,
459 struct net_device *dev, struct Qdisc *sch, void *data,
460 void *cb_priv,
461 void (*cleanup)(struct flow_block_cb *block_cb))
462 {
463 flow_block->indr.binder_type = bo->binder_type;
464 flow_block->indr.data = data;
465 flow_block->indr.cb_priv = cb_priv;
466 flow_block->indr.dev = dev;
467 flow_block->indr.sch = sch;
468 flow_block->indr.cleanup = cleanup;
469 }
470
flow_indr_block_cb_alloc(flow_setup_cb_t * cb,void * cb_ident,void * cb_priv,void (* release)(void * cb_priv),struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,void * data,void * indr_cb_priv,void (* cleanup)(struct flow_block_cb * block_cb))471 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
472 void *cb_ident, void *cb_priv,
473 void (*release)(void *cb_priv),
474 struct flow_block_offload *bo,
475 struct net_device *dev,
476 struct Qdisc *sch, void *data,
477 void *indr_cb_priv,
478 void (*cleanup)(struct flow_block_cb *block_cb))
479 {
480 struct flow_block_cb *block_cb;
481
482 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
483 if (IS_ERR(block_cb))
484 goto out;
485
486 flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
487 list_add(&block_cb->indr.list, &flow_block_indr_list);
488
489 out:
490 return block_cb;
491 }
492 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
493
find_indir_dev(void * data)494 static struct flow_indir_dev_info *find_indir_dev(void *data)
495 {
496 struct flow_indir_dev_info *cur;
497
498 list_for_each_entry(cur, &flow_indir_dev_list, list) {
499 if (cur->data == data)
500 return cur;
501 }
502 return NULL;
503 }
504
indir_dev_add(void * data,struct net_device * dev,struct Qdisc * sch,enum tc_setup_type type,void (* cleanup)(struct flow_block_cb * block_cb),struct flow_block_offload * bo)505 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
506 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
507 struct flow_block_offload *bo)
508 {
509 struct flow_indir_dev_info *info;
510
511 info = find_indir_dev(data);
512 if (info)
513 return -EEXIST;
514
515 info = kzalloc(sizeof(*info), GFP_KERNEL);
516 if (!info)
517 return -ENOMEM;
518
519 info->data = data;
520 info->dev = dev;
521 info->sch = sch;
522 info->type = type;
523 info->cleanup = cleanup;
524 info->command = bo->command;
525 info->binder_type = bo->binder_type;
526 info->cb_list = bo->cb_list_head;
527
528 list_add(&info->list, &flow_indir_dev_list);
529 return 0;
530 }
531
indir_dev_remove(void * data)532 static int indir_dev_remove(void *data)
533 {
534 struct flow_indir_dev_info *info;
535
536 info = find_indir_dev(data);
537 if (!info)
538 return -ENOENT;
539
540 list_del(&info->list);
541
542 kfree(info);
543 return 0;
544 }
545
flow_indr_dev_setup_offload(struct net_device * dev,struct Qdisc * sch,enum tc_setup_type type,void * data,struct flow_block_offload * bo,void (* cleanup)(struct flow_block_cb * block_cb))546 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
547 enum tc_setup_type type, void *data,
548 struct flow_block_offload *bo,
549 void (*cleanup)(struct flow_block_cb *block_cb))
550 {
551 struct flow_indr_dev *this;
552
553 mutex_lock(&flow_indr_block_lock);
554
555 if (bo->command == FLOW_BLOCK_BIND)
556 indir_dev_add(data, dev, sch, type, cleanup, bo);
557 else if (bo->command == FLOW_BLOCK_UNBIND)
558 indir_dev_remove(data);
559
560 list_for_each_entry(this, &flow_block_indr_dev_list, list)
561 this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
562
563 mutex_unlock(&flow_indr_block_lock);
564
565 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
566 }
567 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
568
flow_indr_dev_exists(void)569 bool flow_indr_dev_exists(void)
570 {
571 return !list_empty(&flow_block_indr_dev_list);
572 }
573 EXPORT_SYMBOL(flow_indr_dev_exists);
574