• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
3  *
4  * Written 1998,1999 by Werner Almesberger, EPFL ICA
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 
17 /*
18  * Passing parameters to the root seems to be done more awkwardly than really
19  * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20  * verified. FIXME.
21  */
22 
23 #define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
24 #define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
25 
26 
27 struct tcindex_filter_result {
28 	struct tcf_exts		exts;
29 	struct tcf_result	res;
30 };
31 
32 struct tcindex_filter {
33 	u16 key;
34 	struct tcindex_filter_result result;
35 	struct tcindex_filter __rcu *next;
36 	struct rcu_head rcu;
37 };
38 
39 
40 struct tcindex_data {
41 	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
42 	struct tcindex_filter __rcu **h; /* imperfect hash; */
43 	struct tcf_proto *tp;
44 	u16 mask;		/* AND key with mask */
45 	u32 shift;		/* shift ANDed key to the right */
46 	u32 hash;		/* hash table size; 0 if undefined */
47 	u32 alloc_hash;		/* allocated size */
48 	u32 fall_through;	/* 0: only classify if explicit match */
49 	struct rcu_head rcu;
50 };
51 
52 static inline int
tcindex_filter_is_set(struct tcindex_filter_result * r)53 tcindex_filter_is_set(struct tcindex_filter_result *r)
54 {
55 	return tcf_exts_is_predicative(&r->exts) || r->res.classid;
56 }
57 
58 static struct tcindex_filter_result *
tcindex_lookup(struct tcindex_data * p,u16 key)59 tcindex_lookup(struct tcindex_data *p, u16 key)
60 {
61 	if (p->perfect) {
62 		struct tcindex_filter_result *f = p->perfect + key;
63 
64 		return tcindex_filter_is_set(f) ? f : NULL;
65 	} else if (p->h) {
66 		struct tcindex_filter __rcu **fp;
67 		struct tcindex_filter *f;
68 
69 		fp = &p->h[key % p->hash];
70 		for (f = rcu_dereference_bh_rtnl(*fp);
71 		     f;
72 		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
73 			if (f->key == key)
74 				return &f->result;
75 	}
76 
77 	return NULL;
78 }
79 
80 
tcindex_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)81 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
82 			    struct tcf_result *res)
83 {
84 	struct tcindex_data *p = rcu_dereference_bh(tp->root);
85 	struct tcindex_filter_result *f;
86 	int key = (skb->tc_index & p->mask) >> p->shift;
87 
88 	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
89 		 skb, tp, res, p);
90 
91 	f = tcindex_lookup(p, key);
92 	if (!f) {
93 		if (!p->fall_through)
94 			return -1;
95 		res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
96 		res->class = 0;
97 		pr_debug("alg 0x%x\n", res->classid);
98 		return 0;
99 	}
100 	*res = f->res;
101 	pr_debug("map 0x%x\n", res->classid);
102 
103 	return tcf_exts_exec(skb, &f->exts, res);
104 }
105 
106 
tcindex_get(struct tcf_proto * tp,u32 handle)107 static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
108 {
109 	struct tcindex_data *p = rtnl_dereference(tp->root);
110 	struct tcindex_filter_result *r;
111 
112 	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
113 	if (p->perfect && handle >= p->alloc_hash)
114 		return 0;
115 	r = tcindex_lookup(p, handle);
116 	return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
117 }
118 
119 
tcindex_put(struct tcf_proto * tp,unsigned long f)120 static void tcindex_put(struct tcf_proto *tp, unsigned long f)
121 {
122 	pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
123 }
124 
125 
tcindex_init(struct tcf_proto * tp)126 static int tcindex_init(struct tcf_proto *tp)
127 {
128 	struct tcindex_data *p;
129 
130 	pr_debug("tcindex_init(tp %p)\n", tp);
131 	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
132 	if (!p)
133 		return -ENOMEM;
134 
135 	p->mask = 0xffff;
136 	p->hash = DEFAULT_HASH_SIZE;
137 	p->fall_through = 1;
138 
139 	rcu_assign_pointer(tp->root, p);
140 	return 0;
141 }
142 
143 static int
tcindex_delete(struct tcf_proto * tp,unsigned long arg)144 tcindex_delete(struct tcf_proto *tp, unsigned long arg)
145 {
146 	struct tcindex_data *p = rtnl_dereference(tp->root);
147 	struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
148 	struct tcindex_filter __rcu **walk;
149 	struct tcindex_filter *f = NULL;
150 
151 	pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p);
152 	if (p->perfect) {
153 		if (!r->res.class)
154 			return -ENOENT;
155 	} else {
156 		int i;
157 
158 		for (i = 0; i < p->hash; i++) {
159 			walk = p->h + i;
160 			for (f = rtnl_dereference(*walk); f;
161 			     walk = &f->next, f = rtnl_dereference(*walk)) {
162 				if (&f->result == r)
163 					goto found;
164 			}
165 		}
166 		return -ENOENT;
167 
168 found:
169 		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
170 	}
171 	tcf_unbind_filter(tp, &r->res);
172 	tcf_exts_destroy(&r->exts);
173 	if (f)
174 		kfree_rcu(f, rcu);
175 	return 0;
176 }
177 
tcindex_destroy_element(struct tcf_proto * tp,unsigned long arg,struct tcf_walker * walker)178 static int tcindex_destroy_element(struct tcf_proto *tp,
179 				   unsigned long arg,
180 				   struct tcf_walker *walker)
181 {
182 	return tcindex_delete(tp, arg);
183 }
184 
__tcindex_destroy(struct rcu_head * head)185 static void __tcindex_destroy(struct rcu_head *head)
186 {
187 	struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
188 
189 	kfree(p->perfect);
190 	kfree(p->h);
191 	kfree(p);
192 }
193 
194 static inline int
valid_perfect_hash(struct tcindex_data * p)195 valid_perfect_hash(struct tcindex_data *p)
196 {
197 	return  p->hash > (p->mask >> p->shift);
198 }
199 
200 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
201 	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
202 	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
203 	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
204 	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
205 	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
206 };
207 
tcindex_filter_result_init(struct tcindex_filter_result * r)208 static void tcindex_filter_result_init(struct tcindex_filter_result *r)
209 {
210 	memset(r, 0, sizeof(*r));
211 	tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
212 }
213 
__tcindex_partial_destroy(struct rcu_head * head)214 static void __tcindex_partial_destroy(struct rcu_head *head)
215 {
216 	struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
217 
218 	kfree(p->perfect);
219 	kfree(p);
220 }
221 
222 static int
tcindex_set_parms(struct net * net,struct tcf_proto * tp,unsigned long base,u32 handle,struct tcindex_data * p,struct tcindex_filter_result * r,struct nlattr ** tb,struct nlattr * est,bool ovr)223 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
224 		  u32 handle, struct tcindex_data *p,
225 		  struct tcindex_filter_result *r, struct nlattr **tb,
226 		  struct nlattr *est, bool ovr)
227 {
228 	int err, balloc = 0;
229 	struct tcindex_filter_result new_filter_result, *old_r = r;
230 	struct tcindex_filter_result cr;
231 	struct tcindex_data *cp, *oldp;
232 	struct tcindex_filter *f = NULL; /* make gcc behave */
233 	struct tcf_exts e;
234 
235 	tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
236 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
237 	if (err < 0)
238 		return err;
239 
240 	err = -ENOMEM;
241 	/* tcindex_data attributes must look atomic to classifier/lookup so
242 	 * allocate new tcindex data and RCU assign it onto root. Keeping
243 	 * perfect hash and hash pointers from old data.
244 	 */
245 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
246 	if (!cp)
247 		goto errout;
248 
249 	cp->mask = p->mask;
250 	cp->shift = p->shift;
251 	cp->hash = p->hash;
252 	cp->alloc_hash = p->alloc_hash;
253 	cp->fall_through = p->fall_through;
254 	cp->tp = tp;
255 
256 	if (p->perfect) {
257 		int i;
258 
259 		cp->perfect = kmemdup(p->perfect,
260 				      sizeof(*r) * cp->hash, GFP_KERNEL);
261 		if (!cp->perfect)
262 			goto errout;
263 		for (i = 0; i < cp->hash; i++)
264 			tcf_exts_init(&cp->perfect[i].exts,
265 				      TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
266 		balloc = 1;
267 	}
268 	cp->h = p->h;
269 
270 	tcindex_filter_result_init(&new_filter_result);
271 	tcindex_filter_result_init(&cr);
272 	if (old_r)
273 		cr.res = r->res;
274 
275 	if (tb[TCA_TCINDEX_HASH])
276 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
277 
278 	if (tb[TCA_TCINDEX_MASK])
279 		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
280 
281 	if (tb[TCA_TCINDEX_SHIFT])
282 		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
283 
284 	err = -EBUSY;
285 
286 	/* Hash already allocated, make sure that we still meet the
287 	 * requirements for the allocated hash.
288 	 */
289 	if (cp->perfect) {
290 		if (!valid_perfect_hash(cp) ||
291 		    cp->hash > cp->alloc_hash)
292 			goto errout_alloc;
293 	} else if (cp->h && cp->hash != cp->alloc_hash) {
294 		goto errout_alloc;
295 	}
296 
297 	err = -EINVAL;
298 	if (tb[TCA_TCINDEX_FALL_THROUGH])
299 		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
300 
301 	if (!cp->hash) {
302 		/* Hash not specified, use perfect hash if the upper limit
303 		 * of the hashing index is below the threshold.
304 		 */
305 		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
306 			cp->hash = (cp->mask >> cp->shift) + 1;
307 		else
308 			cp->hash = DEFAULT_HASH_SIZE;
309 	}
310 
311 	if (!cp->perfect && !cp->h)
312 		cp->alloc_hash = cp->hash;
313 
314 	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
315 	 * but then, we'd fail handles that may become valid after some future
316 	 * mask change. While this is extremely unlikely to ever matter,
317 	 * the check below is safer (and also more backwards-compatible).
318 	 */
319 	if (cp->perfect || valid_perfect_hash(cp))
320 		if (handle >= cp->alloc_hash)
321 			goto errout_alloc;
322 
323 
324 	err = -ENOMEM;
325 	if (!cp->perfect && !cp->h) {
326 		if (valid_perfect_hash(cp)) {
327 			int i;
328 
329 			cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL);
330 			if (!cp->perfect)
331 				goto errout_alloc;
332 			for (i = 0; i < cp->hash; i++)
333 				tcf_exts_init(&cp->perfect[i].exts,
334 					      TCA_TCINDEX_ACT,
335 					      TCA_TCINDEX_POLICE);
336 			balloc = 1;
337 		} else {
338 			struct tcindex_filter __rcu **hash;
339 
340 			hash = kcalloc(cp->hash,
341 				       sizeof(struct tcindex_filter *),
342 				       GFP_KERNEL);
343 
344 			if (!hash)
345 				goto errout_alloc;
346 
347 			cp->h = hash;
348 			balloc = 2;
349 		}
350 	}
351 
352 	if (cp->perfect)
353 		r = cp->perfect + handle;
354 	else
355 		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
356 
357 	if (r == &new_filter_result) {
358 		f = kzalloc(sizeof(*f), GFP_KERNEL);
359 		if (!f)
360 			goto errout_alloc;
361 		f->key = handle;
362 		tcindex_filter_result_init(&f->result);
363 		f->next = NULL;
364 	}
365 
366 	if (tb[TCA_TCINDEX_CLASSID]) {
367 		cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
368 		tcf_bind_filter(tp, &cr.res, base);
369 	}
370 
371 	if (old_r)
372 		tcf_exts_change(tp, &r->exts, &e);
373 	else
374 		tcf_exts_change(tp, &cr.exts, &e);
375 
376 	if (old_r && old_r != r)
377 		tcindex_filter_result_init(old_r);
378 
379 	oldp = p;
380 	r->res = cr.res;
381 	rcu_assign_pointer(tp->root, cp);
382 
383 	if (r == &new_filter_result) {
384 		struct tcindex_filter *nfp;
385 		struct tcindex_filter __rcu **fp;
386 
387 		tcf_exts_change(tp, &f->result.exts, &r->exts);
388 
389 		fp = cp->h + (handle % cp->hash);
390 		for (nfp = rtnl_dereference(*fp);
391 		     nfp;
392 		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
393 				; /* nothing */
394 
395 		rcu_assign_pointer(*fp, f);
396 	}
397 
398 	if (oldp)
399 		call_rcu(&oldp->rcu, __tcindex_partial_destroy);
400 	return 0;
401 
402 errout_alloc:
403 	if (balloc == 1)
404 		kfree(cp->perfect);
405 	else if (balloc == 2)
406 		kfree(cp->h);
407 errout:
408 	kfree(cp);
409 	tcf_exts_destroy(&e);
410 	return err;
411 }
412 
413 static int
tcindex_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,unsigned long * arg,bool ovr)414 tcindex_change(struct net *net, struct sk_buff *in_skb,
415 	       struct tcf_proto *tp, unsigned long base, u32 handle,
416 	       struct nlattr **tca, unsigned long *arg, bool ovr)
417 {
418 	struct nlattr *opt = tca[TCA_OPTIONS];
419 	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
420 	struct tcindex_data *p = rtnl_dereference(tp->root);
421 	struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
422 	int err;
423 
424 	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
425 	    "p %p,r %p,*arg 0x%lx\n",
426 	    tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
427 
428 	if (!opt)
429 		return 0;
430 
431 	err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
432 	if (err < 0)
433 		return err;
434 
435 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
436 				 tca[TCA_RATE], ovr);
437 }
438 
tcindex_walk(struct tcf_proto * tp,struct tcf_walker * walker)439 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
440 {
441 	struct tcindex_data *p = rtnl_dereference(tp->root);
442 	struct tcindex_filter *f, *next;
443 	int i;
444 
445 	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
446 	if (p->perfect) {
447 		for (i = 0; i < p->hash; i++) {
448 			if (!p->perfect[i].res.class)
449 				continue;
450 			if (walker->count >= walker->skip) {
451 				if (walker->fn(tp,
452 				    (unsigned long) (p->perfect+i), walker)
453 				     < 0) {
454 					walker->stop = 1;
455 					return;
456 				}
457 			}
458 			walker->count++;
459 		}
460 	}
461 	if (!p->h)
462 		return;
463 	for (i = 0; i < p->hash; i++) {
464 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
465 			next = rtnl_dereference(f->next);
466 			if (walker->count >= walker->skip) {
467 				if (walker->fn(tp, (unsigned long) &f->result,
468 				    walker) < 0) {
469 					walker->stop = 1;
470 					return;
471 				}
472 			}
473 			walker->count++;
474 		}
475 	}
476 }
477 
tcindex_destroy(struct tcf_proto * tp)478 static void tcindex_destroy(struct tcf_proto *tp)
479 {
480 	struct tcindex_data *p = rtnl_dereference(tp->root);
481 	struct tcf_walker walker;
482 
483 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
484 	walker.count = 0;
485 	walker.skip = 0;
486 	walker.fn = tcindex_destroy_element;
487 	tcindex_walk(tp, &walker);
488 
489 	RCU_INIT_POINTER(tp->root, NULL);
490 	call_rcu(&p->rcu, __tcindex_destroy);
491 }
492 
493 
tcindex_dump(struct net * net,struct tcf_proto * tp,unsigned long fh,struct sk_buff * skb,struct tcmsg * t)494 static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
495     struct sk_buff *skb, struct tcmsg *t)
496 {
497 	struct tcindex_data *p = rtnl_dereference(tp->root);
498 	struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
499 	unsigned char *b = skb_tail_pointer(skb);
500 	struct nlattr *nest;
501 
502 	pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
503 		 tp, fh, skb, t, p, r, b);
504 	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
505 
506 	nest = nla_nest_start(skb, TCA_OPTIONS);
507 	if (nest == NULL)
508 		goto nla_put_failure;
509 
510 	if (!fh) {
511 		t->tcm_handle = ~0; /* whatever ... */
512 		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
513 		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
514 		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
515 		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
516 			goto nla_put_failure;
517 		nla_nest_end(skb, nest);
518 	} else {
519 		if (p->perfect) {
520 			t->tcm_handle = r - p->perfect;
521 		} else {
522 			struct tcindex_filter *f;
523 			struct tcindex_filter __rcu **fp;
524 			int i;
525 
526 			t->tcm_handle = 0;
527 			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
528 				fp = &p->h[i];
529 				for (f = rtnl_dereference(*fp);
530 				     !t->tcm_handle && f;
531 				     fp = &f->next, f = rtnl_dereference(*fp)) {
532 					if (&f->result == r)
533 						t->tcm_handle = f->key;
534 				}
535 			}
536 		}
537 		pr_debug("handle = %d\n", t->tcm_handle);
538 		if (r->res.class &&
539 		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
540 			goto nla_put_failure;
541 
542 		if (tcf_exts_dump(skb, &r->exts) < 0)
543 			goto nla_put_failure;
544 		nla_nest_end(skb, nest);
545 
546 		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
547 			goto nla_put_failure;
548 	}
549 
550 	return skb->len;
551 
552 nla_put_failure:
553 	nlmsg_trim(skb, b);
554 	return -1;
555 }
556 
557 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
558 	.kind		=	"tcindex",
559 	.classify	=	tcindex_classify,
560 	.init		=	tcindex_init,
561 	.destroy	=	tcindex_destroy,
562 	.get		=	tcindex_get,
563 	.put		=	tcindex_put,
564 	.change		=	tcindex_change,
565 	.delete		=	tcindex_delete,
566 	.walk		=	tcindex_walk,
567 	.dump		=	tcindex_dump,
568 	.owner		=	THIS_MODULE,
569 };
570 
init_tcindex(void)571 static int __init init_tcindex(void)
572 {
573 	return register_tcf_proto_ops(&cls_tcindex_ops);
574 }
575 
exit_tcindex(void)576 static void __exit exit_tcindex(void)
577 {
578 	unregister_tcf_proto_ops(&cls_tcindex_ops);
579 }
580 
581 module_init(init_tcindex)
582 module_exit(exit_tcindex)
583 MODULE_LICENSE("GPL");
584