Lines Matching refs:cl
213 struct htb_class *cl; in htb_classify() local
224 cl = htb_find(skb->priority, sch); in htb_classify()
225 if (cl) { in htb_classify()
226 if (cl->level == 0) in htb_classify()
227 return cl; in htb_classify()
229 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
247 cl = (void *)res.class; in htb_classify()
248 if (!cl) { in htb_classify()
251 cl = htb_find(res.classid, sch); in htb_classify()
252 if (!cl) in htb_classify()
255 if (!cl->level) in htb_classify()
256 return cl; /* we hit leaf; return it */ in htb_classify()
259 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
262 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
263 if (!cl || cl->level) in htb_classify()
265 return cl; in htb_classify()
275 struct htb_class *cl, int prio) in htb_add_to_id_tree() argument
284 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
289 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
290 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
301 struct htb_class *cl, s64 delay) in htb_add_to_wait_tree() argument
303 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
305 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
306 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
307 cl->pq_key++; in htb_add_to_wait_tree()
310 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
311 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
317 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
322 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
323 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
344 struct htb_class *cl, int mask) in htb_add_class_to_row() argument
346 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
350 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
373 struct htb_class *cl, int mask) in htb_remove_class_from_row() argument
376 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
383 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
386 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
390 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
400 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
402 struct htb_class *p = cl->parent; in htb_activate_prios()
403 long m, mask = cl->prio_activity; in htb_activate_prios()
405 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
417 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
420 cl = p; in htb_activate_prios()
421 p = cl->parent; in htb_activate_prios()
424 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
425 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
435 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
437 struct htb_class *p = cl->parent; in htb_deactivate_prios()
438 long m, mask = cl->prio_activity; in htb_deactivate_prios()
440 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
447 if (p->inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
452 p->inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
456 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
464 cl = p; in htb_deactivate_prios()
465 p = cl->parent; in htb_deactivate_prios()
468 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
469 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
472 static inline s64 htb_lowater(const struct htb_class *cl) in htb_lowater() argument
475 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
479 static inline s64 htb_hiwater(const struct htb_class *cl) in htb_hiwater() argument
482 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
500 htb_class_mode(struct htb_class *cl, s64 *diff) in htb_class_mode() argument
504 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
509 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
526 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
528 enum htb_cmode new_mode = htb_class_mode(cl, diff); in htb_change_class_mode()
530 if (new_mode == cl->cmode) in htb_change_class_mode()
534 cl->overlimits++; in htb_change_class_mode()
538 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
539 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
540 htb_deactivate_prios(q, cl); in htb_change_class_mode()
541 cl->cmode = new_mode; in htb_change_class_mode()
543 htb_activate_prios(q, cl); in htb_change_class_mode()
545 cl->cmode = new_mode; in htb_change_class_mode()
555 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
557 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
559 if (!cl->prio_activity) { in htb_activate()
560 cl->prio_activity = 1 << cl->prio; in htb_activate()
561 htb_activate_prios(q, cl); in htb_activate()
571 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
573 WARN_ON(!cl->prio_activity); in htb_deactivate()
575 htb_deactivate_prios(q, cl); in htb_deactivate()
576 cl->prio_activity = 0; in htb_deactivate()
585 struct htb_class *cl = htb_classify(skb, sch, &ret); in htb_enqueue() local
587 if (cl == HTB_DIRECT) { in htb_enqueue()
596 } else if (!cl) { in htb_enqueue()
602 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
606 cl->drops++; in htb_enqueue()
610 htb_activate(q, cl); in htb_enqueue()
618 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_tokens() argument
620 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
622 if (toks > cl->buffer) in htb_accnt_tokens()
623 toks = cl->buffer; in htb_accnt_tokens()
624 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
625 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
626 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
628 cl->tokens = toks; in htb_accnt_tokens()
631 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_ctokens() argument
633 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
635 if (toks > cl->cbuffer) in htb_accnt_ctokens()
636 toks = cl->cbuffer; in htb_accnt_ctokens()
637 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
638 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
639 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
641 cl->ctokens = toks; in htb_accnt_ctokens()
655 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
662 while (cl) { in htb_charge_class()
663 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
664 if (cl->level >= level) { in htb_charge_class()
665 if (cl->level == level) in htb_charge_class()
666 cl->xstats.lends++; in htb_charge_class()
667 htb_accnt_tokens(cl, bytes, diff); in htb_charge_class()
669 cl->xstats.borrows++; in htb_charge_class()
670 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
672 htb_accnt_ctokens(cl, bytes, diff); in htb_charge_class()
673 cl->t_c = q->now; in htb_charge_class()
675 old_mode = cl->cmode; in htb_charge_class()
677 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
678 if (old_mode != cl->cmode) { in htb_charge_class()
680 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
681 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
682 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
686 if (cl->level) in htb_charge_class()
687 bstats_update(&cl->bstats, skb); in htb_charge_class()
689 cl = cl->parent; in htb_charge_class()
711 struct htb_class *cl; in htb_do_events() local
718 cl = rb_entry(p, struct htb_class, pq_node); in htb_do_events()
719 if (cl->pq_key > q->now) in htb_do_events()
720 return cl->pq_key; in htb_do_events()
723 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
724 htb_change_class_mode(q, cl, &diff); in htb_do_events()
725 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
726 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
746 struct htb_class *cl = in htb_id_find_next_upper() local
749 if (id > cl->common.classid) { in htb_id_find_next_upper()
751 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
804 struct htb_class *cl; in htb_lookup_leaf() local
807 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
808 if (!cl->level) in htb_lookup_leaf()
809 return cl; in htb_lookup_leaf()
810 clp = &cl->inner.clprio[prio]; in htb_lookup_leaf()
827 struct htb_class *cl, *start; in htb_dequeue_tree() local
832 start = cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
836 if (unlikely(!cl)) in htb_dequeue_tree()
844 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
846 htb_deactivate(q, cl); in htb_dequeue_tree()
854 if (cl == start) /* fix start if we just deleted it */ in htb_dequeue_tree()
856 cl = next; in htb_dequeue_tree()
860 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
864 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
865 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: in htb_dequeue_tree()
867 cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
869 } while (cl != start); in htb_dequeue_tree()
872 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
873 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
874 if (cl->leaf.deficit[level] < 0) { in htb_dequeue_tree()
875 cl->leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
876 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : in htb_dequeue_tree()
882 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
883 htb_deactivate(q, cl); in htb_dequeue_tree()
884 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
952 struct htb_class *cl; in htb_reset() local
956 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
957 if (cl->level) in htb_reset()
958 memset(&cl->inner, 0, sizeof(cl->inner)); in htb_reset()
960 if (cl->leaf.q) in htb_reset()
961 qdisc_reset(cl->leaf.q); in htb_reset()
963 cl->prio_activity = 0; in htb_reset()
964 cl->cmode = HTB_CAN_SEND; in htb_reset()
1077 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class() local
1084 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1085 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1086 if (!cl->level && cl->leaf.q) in htb_dump_class()
1087 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1095 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1096 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1097 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1098 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1099 opt.quantum = cl->quantum; in htb_dump_class()
1100 opt.prio = cl->prio; in htb_dump_class()
1101 opt.level = cl->level; in htb_dump_class()
1104 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1105 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, in htb_dump_class()
1108 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1109 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class()
1123 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class_stats() local
1125 .drops = cl->drops, in htb_dump_class_stats()
1126 .overlimits = cl->overlimits, in htb_dump_class_stats()
1130 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1131 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1133 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), in htb_dump_class_stats()
1135 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), in htb_dump_class_stats()
1139 d, NULL, &cl->bstats) < 0 || in htb_dump_class_stats()
1140 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in htb_dump_class_stats()
1144 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1150 struct htb_class *cl = (struct htb_class *)arg; in htb_graft() local
1152 if (cl->level) in htb_graft()
1156 cl->common.classid, extack)) == NULL) in htb_graft()
1159 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1165 struct htb_class *cl = (struct htb_class *)arg; in htb_leaf() local
1166 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1171 struct htb_class *cl = (struct htb_class *)arg; in htb_qlen_notify() local
1173 htb_deactivate(qdisc_priv(sch), cl); in htb_qlen_notify()
1176 static inline int htb_parent_last_child(struct htb_class *cl) in htb_parent_last_child() argument
1178 if (!cl->parent) in htb_parent_last_child()
1181 if (cl->parent->children > 1) in htb_parent_last_child()
1187 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, in htb_parent_to_leaf() argument
1190 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1192 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1207 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) in htb_destroy_class() argument
1209 if (!cl->level) { in htb_destroy_class()
1210 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1211 qdisc_put(cl->leaf.q); in htb_destroy_class()
1213 gen_kill_estimator(&cl->rate_est); in htb_destroy_class()
1214 tcf_block_put(cl->block); in htb_destroy_class()
1215 kfree(cl); in htb_destroy_class()
1222 struct htb_class *cl; in htb_destroy() local
1235 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1236 tcf_block_put(cl->block); in htb_destroy()
1237 cl->block = NULL; in htb_destroy()
1241 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1243 htb_destroy_class(sch, cl); in htb_destroy()
1252 struct htb_class *cl = (struct htb_class *)arg; in htb_delete() local
1260 if (cl->children || cl->filter_cnt) in htb_delete()
1263 if (!cl->level && htb_parent_last_child(cl)) { in htb_delete()
1265 cl->parent->common.classid, in htb_delete()
1272 if (!cl->level) in htb_delete()
1273 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1276 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1277 if (cl->parent) in htb_delete()
1278 cl->parent->children--; in htb_delete()
1280 if (cl->prio_activity) in htb_delete()
1281 htb_deactivate(q, cl); in htb_delete()
1283 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1284 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1285 &q->hlevel[cl->level].wait_pq); in htb_delete()
1288 htb_parent_to_leaf(q, cl, new_q); in htb_delete()
1292 htb_destroy_class(sch, cl); in htb_delete()
1302 struct htb_class *cl = (struct htb_class *)*arg, *parent; in htb_change_class() local
1338 if (!cl) { /* new class */ in htb_change_class()
1367 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in htb_change_class()
1368 if (!cl) in htb_change_class()
1371 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in htb_change_class()
1373 kfree(cl); in htb_change_class()
1377 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1378 &cl->rate_est, in htb_change_class()
1383 tcf_block_put(cl->block); in htb_change_class()
1384 kfree(cl); in htb_change_class()
1389 cl->children = 0; in htb_change_class()
1390 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1393 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1419 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1421 cl->common.classid = classid; in htb_change_class()
1422 cl->parent = parent; in htb_change_class()
1425 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1426 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1427 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1428 cl->t_c = ktime_get_ns(); in htb_change_class()
1429 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1432 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1435 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1436 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
1439 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
1440 &cl->rate_est, in htb_change_class()
1454 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
1455 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
1460 if (!cl->level) { in htb_change_class()
1461 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
1464 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
1466 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
1468 cl->quantum = 1000; in htb_change_class()
1470 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
1472 cl->quantum = 200000; in htb_change_class()
1475 cl->quantum = hopt->quantum; in htb_change_class()
1476 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
1477 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
1480 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1481 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1488 cl->common.classid, (warn == -1 ? "small" : "big")); in htb_change_class()
1492 *arg = (unsigned long)cl; in htb_change_class()
1503 struct htb_class *cl = (struct htb_class *)arg; in htb_tcf_block() local
1505 return cl ? cl->block : q->block; in htb_tcf_block()
1511 struct htb_class *cl = htb_find(classid, sch); in htb_bind_filter() local
1522 if (cl) in htb_bind_filter()
1523 cl->filter_cnt++; in htb_bind_filter()
1524 return (unsigned long)cl; in htb_bind_filter()
1529 struct htb_class *cl = (struct htb_class *)arg; in htb_unbind_filter() local
1531 if (cl) in htb_unbind_filter()
1532 cl->filter_cnt--; in htb_unbind_filter()
1538 struct htb_class *cl; in htb_walk() local
1545 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()
1550 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in htb_walk()