• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
4  *
5  * Written 1998,1999 by Werner Almesberger, EPFL ICA
6  */
7 
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/refcount.h>
15 #include <linux/rcupdate.h>
16 #include <net/act_api.h>
17 #include <net/netlink.h>
18 #include <net/pkt_cls.h>
19 #include <net/sch_generic.h>
20 
21 /*
22  * Passing parameters to the root seems to be done more awkwardly than really
23  * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
24  * verified. FIXME.
25  */
26 
27 #define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
28 #define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
29 
30 
31 struct tcindex_data;
32 
33 struct tcindex_filter_result {
34 	struct tcf_exts		exts;
35 	struct tcf_result	res;
36 	struct tcindex_data	*p;
37 	struct rcu_work		rwork;
38 };
39 
40 struct tcindex_filter {
41 	u16 key;
42 	struct tcindex_filter_result result;
43 	struct tcindex_filter __rcu *next;
44 	struct rcu_work rwork;
45 };
46 
47 
48 struct tcindex_data {
49 	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
50 	struct tcindex_filter __rcu **h; /* imperfect hash; */
51 	struct tcf_proto *tp;
52 	u16 mask;		/* AND key with mask */
53 	u32 shift;		/* shift ANDed key to the right */
54 	u32 hash;		/* hash table size; 0 if undefined */
55 	u32 alloc_hash;		/* allocated size */
56 	u32 fall_through;	/* 0: only classify if explicit match */
57 	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
58 	struct rcu_work rwork;
59 };
60 
tcindex_filter_is_set(struct tcindex_filter_result * r)61 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
62 {
63 	return tcf_exts_has_actions(&r->exts) || r->res.classid;
64 }
65 
tcindex_data_get(struct tcindex_data * p)66 static void tcindex_data_get(struct tcindex_data *p)
67 {
68 	refcount_inc(&p->refcnt);
69 }
70 
tcindex_data_put(struct tcindex_data * p)71 static void tcindex_data_put(struct tcindex_data *p)
72 {
73 	if (refcount_dec_and_test(&p->refcnt)) {
74 		kfree(p->perfect);
75 		kfree(p->h);
76 		kfree(p);
77 	}
78 }
79 
tcindex_lookup(struct tcindex_data * p,u16 key)80 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
81 						    u16 key)
82 {
83 	if (p->perfect) {
84 		struct tcindex_filter_result *f = p->perfect + key;
85 
86 		return tcindex_filter_is_set(f) ? f : NULL;
87 	} else if (p->h) {
88 		struct tcindex_filter __rcu **fp;
89 		struct tcindex_filter *f;
90 
91 		fp = &p->h[key % p->hash];
92 		for (f = rcu_dereference_bh_rtnl(*fp);
93 		     f;
94 		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
95 			if (f->key == key)
96 				return &f->result;
97 	}
98 
99 	return NULL;
100 }
101 
102 
tcindex_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)103 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
104 			    struct tcf_result *res)
105 {
106 	struct tcindex_data *p = rcu_dereference_bh(tp->root);
107 	struct tcindex_filter_result *f;
108 	int key = (skb->tc_index & p->mask) >> p->shift;
109 
110 	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
111 		 skb, tp, res, p);
112 
113 	f = tcindex_lookup(p, key);
114 	if (!f) {
115 		struct Qdisc *q = tcf_block_q(tp->chain->block);
116 
117 		if (!p->fall_through)
118 			return -1;
119 		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
120 		res->class = 0;
121 		pr_debug("alg 0x%x\n", res->classid);
122 		return 0;
123 	}
124 	*res = f->res;
125 	pr_debug("map 0x%x\n", res->classid);
126 
127 	return tcf_exts_exec(skb, &f->exts, res);
128 }
129 
130 
tcindex_get(struct tcf_proto * tp,u32 handle)131 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
132 {
133 	struct tcindex_data *p = rtnl_dereference(tp->root);
134 	struct tcindex_filter_result *r;
135 
136 	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
137 	if (p->perfect && handle >= p->alloc_hash)
138 		return NULL;
139 	r = tcindex_lookup(p, handle);
140 	return r && tcindex_filter_is_set(r) ? r : NULL;
141 }
142 
tcindex_init(struct tcf_proto * tp)143 static int tcindex_init(struct tcf_proto *tp)
144 {
145 	struct tcindex_data *p;
146 
147 	pr_debug("tcindex_init(tp %p)\n", tp);
148 	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
149 	if (!p)
150 		return -ENOMEM;
151 
152 	p->mask = 0xffff;
153 	p->hash = DEFAULT_HASH_SIZE;
154 	p->fall_through = 1;
155 	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
156 
157 	rcu_assign_pointer(tp->root, p);
158 	return 0;
159 }
160 
__tcindex_destroy_rexts(struct tcindex_filter_result * r)161 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
162 {
163 	tcf_exts_destroy(&r->exts);
164 	tcf_exts_put_net(&r->exts);
165 	tcindex_data_put(r->p);
166 }
167 
tcindex_destroy_rexts_work(struct work_struct * work)168 static void tcindex_destroy_rexts_work(struct work_struct *work)
169 {
170 	struct tcindex_filter_result *r;
171 
172 	r = container_of(to_rcu_work(work),
173 			 struct tcindex_filter_result,
174 			 rwork);
175 	rtnl_lock();
176 	__tcindex_destroy_rexts(r);
177 	rtnl_unlock();
178 }
179 
__tcindex_destroy_fexts(struct tcindex_filter * f)180 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
181 {
182 	tcf_exts_destroy(&f->result.exts);
183 	tcf_exts_put_net(&f->result.exts);
184 	kfree(f);
185 }
186 
tcindex_destroy_fexts_work(struct work_struct * work)187 static void tcindex_destroy_fexts_work(struct work_struct *work)
188 {
189 	struct tcindex_filter *f = container_of(to_rcu_work(work),
190 						struct tcindex_filter,
191 						rwork);
192 
193 	rtnl_lock();
194 	__tcindex_destroy_fexts(f);
195 	rtnl_unlock();
196 }
197 
tcindex_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)198 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
199 			  bool rtnl_held, struct netlink_ext_ack *extack)
200 {
201 	struct tcindex_data *p = rtnl_dereference(tp->root);
202 	struct tcindex_filter_result *r = arg;
203 	struct tcindex_filter __rcu **walk;
204 	struct tcindex_filter *f = NULL;
205 
206 	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
207 	if (p->perfect) {
208 		if (!r->res.class)
209 			return -ENOENT;
210 	} else {
211 		int i;
212 
213 		for (i = 0; i < p->hash; i++) {
214 			walk = p->h + i;
215 			for (f = rtnl_dereference(*walk); f;
216 			     walk = &f->next, f = rtnl_dereference(*walk)) {
217 				if (&f->result == r)
218 					goto found;
219 			}
220 		}
221 		return -ENOENT;
222 
223 found:
224 		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
225 	}
226 	tcf_unbind_filter(tp, &r->res);
227 	/* all classifiers are required to call tcf_exts_destroy() after rcu
228 	 * grace period, since converted-to-rcu actions are relying on that
229 	 * in cleanup() callback
230 	 */
231 	if (f) {
232 		if (tcf_exts_get_net(&f->result.exts))
233 			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
234 		else
235 			__tcindex_destroy_fexts(f);
236 	} else {
237 		tcindex_data_get(p);
238 
239 		if (tcf_exts_get_net(&r->exts))
240 			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
241 		else
242 			__tcindex_destroy_rexts(r);
243 	}
244 
245 	*last = false;
246 	return 0;
247 }
248 
tcindex_destroy_work(struct work_struct * work)249 static void tcindex_destroy_work(struct work_struct *work)
250 {
251 	struct tcindex_data *p = container_of(to_rcu_work(work),
252 					      struct tcindex_data,
253 					      rwork);
254 
255 	tcindex_data_put(p);
256 }
257 
258 static inline int
valid_perfect_hash(struct tcindex_data * p)259 valid_perfect_hash(struct tcindex_data *p)
260 {
261 	return  p->hash > (p->mask >> p->shift);
262 }
263 
264 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
265 	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
266 	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
267 	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
268 	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
269 	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
270 };
271 
tcindex_filter_result_init(struct tcindex_filter_result * r,struct tcindex_data * p,struct net * net)272 static int tcindex_filter_result_init(struct tcindex_filter_result *r,
273 				      struct tcindex_data *p,
274 				      struct net *net)
275 {
276 	memset(r, 0, sizeof(*r));
277 	r->p = p;
278 	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
279 			     TCA_TCINDEX_POLICE);
280 }
281 
282 static void tcindex_free_perfect_hash(struct tcindex_data *cp);
283 
tcindex_partial_destroy_work(struct work_struct * work)284 static void tcindex_partial_destroy_work(struct work_struct *work)
285 {
286 	struct tcindex_data *p = container_of(to_rcu_work(work),
287 					      struct tcindex_data,
288 					      rwork);
289 
290 	rtnl_lock();
291 	if (p->perfect)
292 		tcindex_free_perfect_hash(p);
293 	kfree(p);
294 	rtnl_unlock();
295 }
296 
tcindex_free_perfect_hash(struct tcindex_data * cp)297 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
298 {
299 	int i;
300 
301 	for (i = 0; i < cp->hash; i++)
302 		tcf_exts_destroy(&cp->perfect[i].exts);
303 	kfree(cp->perfect);
304 }
305 
tcindex_alloc_perfect_hash(struct net * net,struct tcindex_data * cp)306 static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
307 {
308 	int i, err = 0;
309 
310 	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
311 			      GFP_KERNEL | __GFP_NOWARN);
312 	if (!cp->perfect)
313 		return -ENOMEM;
314 
315 	for (i = 0; i < cp->hash; i++) {
316 		err = tcf_exts_init(&cp->perfect[i].exts, net,
317 				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
318 		if (err < 0)
319 			goto errout;
320 		cp->perfect[i].p = cp;
321 	}
322 
323 	return 0;
324 
325 errout:
326 	tcindex_free_perfect_hash(cp);
327 	return err;
328 }
329 
330 static int
tcindex_set_parms(struct net * net,struct tcf_proto * tp,unsigned long base,u32 handle,struct tcindex_data * p,struct tcindex_filter_result * r,struct nlattr ** tb,struct nlattr * est,bool ovr,struct netlink_ext_ack * extack)331 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
332 		  u32 handle, struct tcindex_data *p,
333 		  struct tcindex_filter_result *r, struct nlattr **tb,
334 		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
335 {
336 	struct tcindex_filter_result new_filter_result, *old_r = r;
337 	struct tcindex_data *cp = NULL, *oldp;
338 	struct tcindex_filter *f = NULL; /* make gcc behave */
339 	struct tcf_result cr = {};
340 	int err, balloc = 0;
341 	struct tcf_exts e;
342 	bool update_h = false;
343 
344 	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
345 	if (err < 0)
346 		return err;
347 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
348 	if (err < 0)
349 		goto errout;
350 
351 	err = -ENOMEM;
352 	/* tcindex_data attributes must look atomic to classifier/lookup so
353 	 * allocate new tcindex data and RCU assign it onto root. Keeping
354 	 * perfect hash and hash pointers from old data.
355 	 */
356 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
357 	if (!cp)
358 		goto errout;
359 
360 	cp->mask = p->mask;
361 	cp->shift = p->shift;
362 	cp->hash = p->hash;
363 	cp->alloc_hash = p->alloc_hash;
364 	cp->fall_through = p->fall_through;
365 	cp->tp = tp;
366 	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
367 
368 	if (tb[TCA_TCINDEX_HASH])
369 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
370 
371 	if (tb[TCA_TCINDEX_MASK])
372 		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
373 
374 	if (tb[TCA_TCINDEX_SHIFT]) {
375 		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
376 		if (cp->shift > 16) {
377 			err = -EINVAL;
378 			goto errout;
379 		}
380 	}
381 	if (!cp->hash) {
382 		/* Hash not specified, use perfect hash if the upper limit
383 		 * of the hashing index is below the threshold.
384 		 */
385 		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
386 			cp->hash = (cp->mask >> cp->shift) + 1;
387 		else
388 			cp->hash = DEFAULT_HASH_SIZE;
389 	}
390 
391 	if (p->perfect) {
392 		int i;
393 
394 		if (tcindex_alloc_perfect_hash(net, cp) < 0)
395 			goto errout;
396 		cp->alloc_hash = cp->hash;
397 		for (i = 0; i < min(cp->hash, p->hash); i++)
398 			cp->perfect[i].res = p->perfect[i].res;
399 		balloc = 1;
400 	}
401 	cp->h = p->h;
402 
403 	err = tcindex_filter_result_init(&new_filter_result, cp, net);
404 	if (err < 0)
405 		goto errout_alloc;
406 	if (old_r)
407 		cr = r->res;
408 
409 	err = -EBUSY;
410 
411 	/* Hash already allocated, make sure that we still meet the
412 	 * requirements for the allocated hash.
413 	 */
414 	if (cp->perfect) {
415 		if (!valid_perfect_hash(cp) ||
416 		    cp->hash > cp->alloc_hash)
417 			goto errout_alloc;
418 	} else if (cp->h && cp->hash != cp->alloc_hash) {
419 		goto errout_alloc;
420 	}
421 
422 	err = -EINVAL;
423 	if (tb[TCA_TCINDEX_FALL_THROUGH])
424 		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
425 
426 	if (!cp->perfect && !cp->h)
427 		cp->alloc_hash = cp->hash;
428 
429 	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
430 	 * but then, we'd fail handles that may become valid after some future
431 	 * mask change. While this is extremely unlikely to ever matter,
432 	 * the check below is safer (and also more backwards-compatible).
433 	 */
434 	if (cp->perfect || valid_perfect_hash(cp))
435 		if (handle >= cp->alloc_hash)
436 			goto errout_alloc;
437 
438 
439 	err = -ENOMEM;
440 	if (!cp->perfect && !cp->h) {
441 		if (valid_perfect_hash(cp)) {
442 			if (tcindex_alloc_perfect_hash(net, cp) < 0)
443 				goto errout_alloc;
444 			balloc = 1;
445 		} else {
446 			struct tcindex_filter __rcu **hash;
447 
448 			hash = kcalloc(cp->hash,
449 				       sizeof(struct tcindex_filter *),
450 				       GFP_KERNEL);
451 
452 			if (!hash)
453 				goto errout_alloc;
454 
455 			cp->h = hash;
456 			balloc = 2;
457 		}
458 	}
459 
460 	if (cp->perfect) {
461 		r = cp->perfect + handle;
462 	} else {
463 		/* imperfect area is updated in-place using rcu */
464 		update_h = !!tcindex_lookup(cp, handle);
465 		r = &new_filter_result;
466 	}
467 
468 	if (r == &new_filter_result) {
469 		f = kzalloc(sizeof(*f), GFP_KERNEL);
470 		if (!f)
471 			goto errout_alloc;
472 		f->key = handle;
473 		f->next = NULL;
474 		err = tcindex_filter_result_init(&f->result, cp, net);
475 		if (err < 0) {
476 			kfree(f);
477 			goto errout_alloc;
478 		}
479 	}
480 
481 	if (tb[TCA_TCINDEX_CLASSID]) {
482 		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
483 		tcf_bind_filter(tp, &cr, base);
484 	}
485 
486 	if (old_r && old_r != r) {
487 		err = tcindex_filter_result_init(old_r, cp, net);
488 		if (err < 0) {
489 			kfree(f);
490 			goto errout_alloc;
491 		}
492 	}
493 
494 	oldp = p;
495 	r->res = cr;
496 	tcf_exts_change(&r->exts, &e);
497 
498 	rcu_assign_pointer(tp->root, cp);
499 
500 	if (update_h) {
501 		struct tcindex_filter __rcu **fp;
502 		struct tcindex_filter *cf;
503 
504 		f->result.res = r->res;
505 		tcf_exts_change(&f->result.exts, &r->exts);
506 
507 		/* imperfect area bucket */
508 		fp = cp->h + (handle % cp->hash);
509 
510 		/* lookup the filter, guaranteed to exist */
511 		for (cf = rcu_dereference_bh_rtnl(*fp); cf;
512 		     fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
513 			if (cf->key == handle)
514 				break;
515 
516 		f->next = cf->next;
517 
518 		cf = rcu_replace_pointer(*fp, f, 1);
519 		tcf_exts_get_net(&cf->result.exts);
520 		tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
521 	} else if (r == &new_filter_result) {
522 		struct tcindex_filter *nfp;
523 		struct tcindex_filter __rcu **fp;
524 
525 		f->result.res = r->res;
526 		tcf_exts_change(&f->result.exts, &r->exts);
527 
528 		fp = cp->h + (handle % cp->hash);
529 		for (nfp = rtnl_dereference(*fp);
530 		     nfp;
531 		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
532 				; /* nothing */
533 
534 		rcu_assign_pointer(*fp, f);
535 	} else {
536 		tcf_exts_destroy(&new_filter_result.exts);
537 	}
538 
539 	if (oldp)
540 		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
541 	return 0;
542 
543 errout_alloc:
544 	if (balloc == 1)
545 		tcindex_free_perfect_hash(cp);
546 	else if (balloc == 2)
547 		kfree(cp->h);
548 	tcf_exts_destroy(&new_filter_result.exts);
549 errout:
550 	kfree(cp);
551 	tcf_exts_destroy(&e);
552 	return err;
553 }
554 
555 static int
tcindex_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)556 tcindex_change(struct net *net, struct sk_buff *in_skb,
557 	       struct tcf_proto *tp, unsigned long base, u32 handle,
558 	       struct nlattr **tca, void **arg, bool ovr,
559 	       bool rtnl_held, struct netlink_ext_ack *extack)
560 {
561 	struct nlattr *opt = tca[TCA_OPTIONS];
562 	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
563 	struct tcindex_data *p = rtnl_dereference(tp->root);
564 	struct tcindex_filter_result *r = *arg;
565 	int err;
566 
567 	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
568 	    "p %p,r %p,*arg %p\n",
569 	    tp, handle, tca, arg, opt, p, r, *arg);
570 
571 	if (!opt)
572 		return 0;
573 
574 	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
575 					  tcindex_policy, NULL);
576 	if (err < 0)
577 		return err;
578 
579 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
580 				 tca[TCA_RATE], ovr, extack);
581 }
582 
tcindex_walk(struct tcf_proto * tp,struct tcf_walker * walker,bool rtnl_held)583 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
584 			 bool rtnl_held)
585 {
586 	struct tcindex_data *p = rtnl_dereference(tp->root);
587 	struct tcindex_filter *f, *next;
588 	int i;
589 
590 	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
591 	if (p->perfect) {
592 		for (i = 0; i < p->hash; i++) {
593 			if (!p->perfect[i].res.class)
594 				continue;
595 			if (walker->count >= walker->skip) {
596 				if (walker->fn(tp, p->perfect + i, walker) < 0) {
597 					walker->stop = 1;
598 					return;
599 				}
600 			}
601 			walker->count++;
602 		}
603 	}
604 	if (!p->h)
605 		return;
606 	for (i = 0; i < p->hash; i++) {
607 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
608 			next = rtnl_dereference(f->next);
609 			if (walker->count >= walker->skip) {
610 				if (walker->fn(tp, &f->result, walker) < 0) {
611 					walker->stop = 1;
612 					return;
613 				}
614 			}
615 			walker->count++;
616 		}
617 	}
618 }
619 
tcindex_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)620 static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
621 			    struct netlink_ext_ack *extack)
622 {
623 	struct tcindex_data *p = rtnl_dereference(tp->root);
624 	int i;
625 
626 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
627 
628 	if (p->perfect) {
629 		for (i = 0; i < p->hash; i++) {
630 			struct tcindex_filter_result *r = p->perfect + i;
631 
632 			/* tcf_queue_work() does not guarantee the ordering we
633 			 * want, so we have to take this refcnt temporarily to
634 			 * ensure 'p' is freed after all tcindex_filter_result
635 			 * here. Imperfect hash does not need this, because it
636 			 * uses linked lists rather than an array.
637 			 */
638 			tcindex_data_get(p);
639 
640 			tcf_unbind_filter(tp, &r->res);
641 			if (tcf_exts_get_net(&r->exts))
642 				tcf_queue_work(&r->rwork,
643 					       tcindex_destroy_rexts_work);
644 			else
645 				__tcindex_destroy_rexts(r);
646 		}
647 	}
648 
649 	for (i = 0; p->h && i < p->hash; i++) {
650 		struct tcindex_filter *f, *next;
651 		bool last;
652 
653 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
654 			next = rtnl_dereference(f->next);
655 			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
656 		}
657 	}
658 
659 	tcf_queue_work(&p->rwork, tcindex_destroy_work);
660 }
661 
662 
tcindex_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)663 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
664 			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
665 {
666 	struct tcindex_data *p = rtnl_dereference(tp->root);
667 	struct tcindex_filter_result *r = fh;
668 	struct nlattr *nest;
669 
670 	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
671 		 tp, fh, skb, t, p, r);
672 	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
673 
674 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
675 	if (nest == NULL)
676 		goto nla_put_failure;
677 
678 	if (!fh) {
679 		t->tcm_handle = ~0; /* whatever ... */
680 		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
681 		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
682 		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
683 		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
684 			goto nla_put_failure;
685 		nla_nest_end(skb, nest);
686 	} else {
687 		if (p->perfect) {
688 			t->tcm_handle = r - p->perfect;
689 		} else {
690 			struct tcindex_filter *f;
691 			struct tcindex_filter __rcu **fp;
692 			int i;
693 
694 			t->tcm_handle = 0;
695 			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
696 				fp = &p->h[i];
697 				for (f = rtnl_dereference(*fp);
698 				     !t->tcm_handle && f;
699 				     fp = &f->next, f = rtnl_dereference(*fp)) {
700 					if (&f->result == r)
701 						t->tcm_handle = f->key;
702 				}
703 			}
704 		}
705 		pr_debug("handle = %d\n", t->tcm_handle);
706 		if (r->res.class &&
707 		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
708 			goto nla_put_failure;
709 
710 		if (tcf_exts_dump(skb, &r->exts) < 0)
711 			goto nla_put_failure;
712 		nla_nest_end(skb, nest);
713 
714 		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
715 			goto nla_put_failure;
716 	}
717 
718 	return skb->len;
719 
720 nla_put_failure:
721 	nla_nest_cancel(skb, nest);
722 	return -1;
723 }
724 
tcindex_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)725 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
726 			       void *q, unsigned long base)
727 {
728 	struct tcindex_filter_result *r = fh;
729 
730 	if (r && r->res.classid == classid) {
731 		if (cl)
732 			__tcf_bind_filter(q, &r->res, base);
733 		else
734 			__tcf_unbind_filter(q, &r->res);
735 	}
736 }
737 
738 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
739 	.kind		=	"tcindex",
740 	.classify	=	tcindex_classify,
741 	.init		=	tcindex_init,
742 	.destroy	=	tcindex_destroy,
743 	.get		=	tcindex_get,
744 	.change		=	tcindex_change,
745 	.delete		=	tcindex_delete,
746 	.walk		=	tcindex_walk,
747 	.dump		=	tcindex_dump,
748 	.bind_class	=	tcindex_bind_class,
749 	.owner		=	THIS_MODULE,
750 };
751 
init_tcindex(void)752 static int __init init_tcindex(void)
753 {
754 	return register_tcf_proto_ops(&cls_tcindex_ops);
755 }
756 
exit_tcindex(void)757 static void __exit exit_tcindex(void)
758 {
759 	unregister_tcf_proto_ops(&cls_tcindex_ops);
760 }
761 
762 module_init(init_tcindex)
763 module_exit(exit_tcindex)
764 MODULE_LICENSE("GPL");
765