Lines Matching refs:cl
163 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) argument
181 struct cbq_class *cl; in cbq_reclassify() local
183 for (cl = this->tparent; cl; cl = cl->tparent) { in cbq_reclassify()
184 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; in cbq_reclassify()
210 struct cbq_class *cl = NULL; in cbq_classify() local
219 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
220 return cl; in cbq_classify()
237 cl = (void *)res.class; in cbq_classify()
238 if (!cl) { in cbq_classify()
240 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
241 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) in cbq_classify()
242 cl = defmap[TC_PRIO_BESTEFFORT]; in cbq_classify()
244 if (cl == NULL) in cbq_classify()
247 if (cl->level >= head->level) in cbq_classify()
257 return cbq_reclassify(skb, cl); in cbq_classify()
260 if (cl->level == 0) in cbq_classify()
261 return cl; in cbq_classify()
268 head = cl; in cbq_classify()
272 cl = head; in cbq_classify()
278 !(cl = head->defaults[prio & TC_PRIO_MAX]) && in cbq_classify()
279 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) in cbq_classify()
282 return cl; in cbq_classify()
291 static inline void cbq_activate_class(struct cbq_class *cl) in cbq_activate_class() argument
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class()
294 int prio = cl->cpriority; in cbq_activate_class()
298 q->active[prio] = cl; in cbq_activate_class()
301 cl->next_alive = cl_tail->next_alive; in cbq_activate_class()
302 cl_tail->next_alive = cl; in cbq_activate_class()
304 cl->next_alive = cl; in cbq_activate_class()
319 struct cbq_class *cl; in cbq_deactivate_class() local
323 cl = cl_prev->next_alive; in cbq_deactivate_class()
324 if (cl == this) { in cbq_deactivate_class()
325 cl_prev->next_alive = cl->next_alive; in cbq_deactivate_class()
326 cl->next_alive = NULL; in cbq_deactivate_class()
328 if (cl == q->active[prio]) { in cbq_deactivate_class()
330 if (cl == q->active[prio]) { in cbq_deactivate_class()
338 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
346 if (toplevel > cl->level) { in cbq_mark_toplevel()
350 if (cl->undertime < now) { in cbq_mark_toplevel()
351 q->toplevel = cl->level; in cbq_mark_toplevel()
354 } while ((cl = cl->borrow) != NULL && toplevel > cl->level); in cbq_mark_toplevel()
364 struct cbq_class *cl = cbq_classify(skb, sch, &ret); in cbq_enqueue() local
367 q->rx_class = cl; in cbq_enqueue()
369 if (cl == NULL) { in cbq_enqueue()
376 ret = qdisc_enqueue(skb, cl->q, to_free); in cbq_enqueue()
379 cbq_mark_toplevel(q, cl); in cbq_enqueue()
380 if (!cl->next_alive) in cbq_enqueue()
381 cbq_activate_class(cl); in cbq_enqueue()
387 cbq_mark_toplevel(q, cl); in cbq_enqueue()
388 cl->qstats.drops++; in cbq_enqueue()
394 static void cbq_overlimit(struct cbq_class *cl) in cbq_overlimit() argument
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_overlimit()
397 psched_tdiff_t delay = cl->undertime - q->now; in cbq_overlimit()
399 if (!cl->delayed) { in cbq_overlimit()
400 delay += cl->offtime; in cbq_overlimit()
409 if (cl->avgidle < 0) in cbq_overlimit()
410 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_overlimit()
411 if (cl->avgidle < cl->minidle) in cbq_overlimit()
412 cl->avgidle = cl->minidle; in cbq_overlimit()
415 cl->undertime = q->now + delay; in cbq_overlimit()
417 cl->xstats.overactions++; in cbq_overlimit()
418 cl->delayed = 1; in cbq_overlimit()
431 for (b = cl->borrow; b; b = b->borrow) { in cbq_overlimit()
447 struct cbq_class *cl; in cbq_undelay_prio() local
455 cl = cl_prev->next_alive; in cbq_undelay_prio()
456 if (now - cl->penalized > 0) { in cbq_undelay_prio()
457 cl_prev->next_alive = cl->next_alive; in cbq_undelay_prio()
458 cl->next_alive = NULL; in cbq_undelay_prio()
459 cl->cpriority = cl->priority; in cbq_undelay_prio()
460 cl->delayed = 0; in cbq_undelay_prio()
461 cbq_activate_class(cl); in cbq_undelay_prio()
463 if (cl == q->active[prio]) { in cbq_undelay_prio()
465 if (cl == q->active[prio]) { in cbq_undelay_prio()
471 cl = cl_prev->next_alive; in cbq_undelay_prio()
472 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
473 sched = cl->penalized; in cbq_undelay_prio()
474 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
532 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
533 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
554 struct cbq_class *cl = this; in cbq_update() local
564 for ( ; cl; cl = cl->share) { in cbq_update()
565 long avgidle = cl->avgidle; in cbq_update()
568 cl->bstats.packets++; in cbq_update()
569 cl->bstats.bytes += len; in cbq_update()
578 idle = now - cl->last; in cbq_update()
580 avgidle = cl->maxidle; in cbq_update()
582 idle -= L2T(cl, len); in cbq_update()
589 avgidle += idle - (avgidle>>cl->ewma_log); in cbq_update()
595 if (avgidle < cl->minidle) in cbq_update()
596 avgidle = cl->minidle; in cbq_update()
598 cl->avgidle = avgidle; in cbq_update()
608 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); in cbq_update()
620 idle += L2T(cl, len); in cbq_update()
622 cl->undertime = now + idle; in cbq_update()
626 cl->undertime = PSCHED_PASTPERFECT; in cbq_update()
627 if (avgidle > cl->maxidle) in cbq_update()
628 cl->avgidle = cl->maxidle; in cbq_update()
630 cl->avgidle = avgidle; in cbq_update()
632 if ((s64)(now - cl->last) > 0) in cbq_update()
633 cl->last = now; in cbq_update()
640 cbq_under_limit(struct cbq_class *cl) in cbq_under_limit() argument
642 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit()
643 struct cbq_class *this_cl = cl; in cbq_under_limit()
645 if (cl->tparent == NULL) in cbq_under_limit()
646 return cl; in cbq_under_limit()
648 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
649 cl->delayed = 0; in cbq_under_limit()
650 return cl; in cbq_under_limit()
664 cl = cl->borrow; in cbq_under_limit()
665 if (!cl) { in cbq_under_limit()
670 if (cl->level > q->toplevel) in cbq_under_limit()
672 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
674 cl->delayed = 0; in cbq_under_limit()
675 return cl; in cbq_under_limit()
682 struct cbq_class *cl_tail, *cl_prev, *cl; in cbq_dequeue_prio() local
687 cl = cl_prev->next_alive; in cbq_dequeue_prio()
694 struct cbq_class *borrow = cl; in cbq_dequeue_prio()
696 if (cl->q->q.qlen && in cbq_dequeue_prio()
697 (borrow = cbq_under_limit(cl)) == NULL) in cbq_dequeue_prio()
700 if (cl->deficit <= 0) { in cbq_dequeue_prio()
705 cl->deficit += cl->quantum; in cbq_dequeue_prio()
709 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
718 cl->deficit -= qdisc_pkt_len(skb); in cbq_dequeue_prio()
719 q->tx_class = cl; in cbq_dequeue_prio()
721 if (borrow != cl) { in cbq_dequeue_prio()
724 cl->xstats.borrows++; in cbq_dequeue_prio()
727 cl->xstats.borrows += qdisc_pkt_len(skb); in cbq_dequeue_prio()
732 if (cl->deficit <= 0) { in cbq_dequeue_prio()
733 q->active[prio] = cl; in cbq_dequeue_prio()
734 cl = cl->next_alive; in cbq_dequeue_prio()
735 cl->deficit += cl->quantum; in cbq_dequeue_prio()
740 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
744 cl_prev->next_alive = cl->next_alive; in cbq_dequeue_prio()
745 cl->next_alive = NULL; in cbq_dequeue_prio()
748 if (cl == cl_tail) { in cbq_dequeue_prio()
753 if (cl == cl_tail) { in cbq_dequeue_prio()
757 if (cl->q->q.qlen) in cbq_dequeue_prio()
758 cbq_activate_class(cl); in cbq_dequeue_prio()
764 if (cl->q->q.qlen) in cbq_dequeue_prio()
765 cbq_activate_class(cl); in cbq_dequeue_prio()
767 cl = cl_prev; in cbq_dequeue_prio()
771 cl_prev = cl; in cbq_dequeue_prio()
772 cl = cl->next_alive; in cbq_dequeue_prio()
871 struct cbq_class *cl; in cbq_adjust_levels() local
873 cl = this->children; in cbq_adjust_levels()
874 if (cl) { in cbq_adjust_levels()
876 if (cl->level > level) in cbq_adjust_levels()
877 level = cl->level; in cbq_adjust_levels()
878 } while ((cl = cl->sibling) != this->children); in cbq_adjust_levels()
886 struct cbq_class *cl; in cbq_normalize_quanta() local
893 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
897 if (cl->priority == prio) { in cbq_normalize_quanta()
898 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
901 if (cl->quantum <= 0 || in cbq_normalize_quanta()
902 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { in cbq_normalize_quanta()
904 cl->common.classid, cl->quantum); in cbq_normalize_quanta()
905 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; in cbq_normalize_quanta()
911 static void cbq_sync_defmap(struct cbq_class *cl) in cbq_sync_defmap() argument
913 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap()
914 struct cbq_class *split = cl->split; in cbq_sync_defmap()
922 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) in cbq_sync_defmap()
947 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) in cbq_change_defmap() argument
952 split = cl->split; in cbq_change_defmap()
959 for (split = cl->tparent; split; split = split->tparent) in cbq_change_defmap()
967 if (cl->split != split) { in cbq_change_defmap()
968 cl->defmap = 0; in cbq_change_defmap()
969 cbq_sync_defmap(cl); in cbq_change_defmap()
970 cl->split = split; in cbq_change_defmap()
971 cl->defmap = def & mask; in cbq_change_defmap()
973 cl->defmap = (cl->defmap & ~mask) | (def & mask); in cbq_change_defmap()
975 cbq_sync_defmap(cl); in cbq_change_defmap()
980 struct cbq_class *cl, **clp; in cbq_unlink_class() local
987 cl = *clp; in cbq_unlink_class()
989 if (cl == this) { in cbq_unlink_class()
990 *clp = cl->sibling; in cbq_unlink_class()
993 clp = &cl->sibling; in cbq_unlink_class()
994 } while ((cl = *clp) != this->sibling); in cbq_unlink_class()
1029 struct cbq_class *cl; in cbq_reset() local
1046 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1047 qdisc_reset(cl->q); in cbq_reset()
1049 cl->next_alive = NULL; in cbq_reset()
1050 cl->undertime = PSCHED_PASTPERFECT; in cbq_reset()
1051 cl->avgidle = cl->maxidle; in cbq_reset()
1052 cl->deficit = cl->quantum; in cbq_reset()
1053 cl->cpriority = cl->priority; in cbq_reset()
1060 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) in cbq_set_lss() argument
1063 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; in cbq_set_lss()
1064 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; in cbq_set_lss()
1067 cl->ewma_log = lss->ewma_log; in cbq_set_lss()
1069 cl->avpkt = lss->avpkt; in cbq_set_lss()
1071 cl->minidle = -(long)lss->minidle; in cbq_set_lss()
1073 cl->maxidle = lss->maxidle; in cbq_set_lss()
1074 cl->avgidle = lss->maxidle; in cbq_set_lss()
1077 cl->offtime = lss->offtime; in cbq_set_lss()
1081 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1083 q->nclasses[cl->priority]--; in cbq_rmprio()
1084 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1085 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1088 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1090 q->nclasses[cl->priority]++; in cbq_addprio()
1091 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1092 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1095 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) in cbq_set_wrr() argument
1097 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr()
1100 cl->allot = wrr->allot; in cbq_set_wrr()
1102 cl->weight = wrr->weight; in cbq_set_wrr()
1104 cl->priority = wrr->priority - 1; in cbq_set_wrr()
1105 cl->cpriority = cl->priority; in cbq_set_wrr()
1106 if (cl->priority >= cl->priority2) in cbq_set_wrr()
1107 cl->priority2 = TC_CBQ_MAXPRIO - 1; in cbq_set_wrr()
1110 cbq_addprio(q, cl); in cbq_set_wrr()
1114 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) in cbq_set_fopt() argument
1116 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); in cbq_set_fopt()
1232 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_rate() argument
1236 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) in cbq_dump_rate()
1245 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_lss() argument
1251 if (cl->borrow == NULL) in cbq_dump_lss()
1253 if (cl->share == NULL) in cbq_dump_lss()
1255 opt.ewma_log = cl->ewma_log; in cbq_dump_lss()
1256 opt.level = cl->level; in cbq_dump_lss()
1257 opt.avpkt = cl->avpkt; in cbq_dump_lss()
1258 opt.maxidle = cl->maxidle; in cbq_dump_lss()
1259 opt.minidle = (u32)(-cl->minidle); in cbq_dump_lss()
1260 opt.offtime = cl->offtime; in cbq_dump_lss()
1271 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_wrr() argument
1278 opt.allot = cl->allot; in cbq_dump_wrr()
1279 opt.priority = cl->priority + 1; in cbq_dump_wrr()
1280 opt.cpriority = cl->cpriority + 1; in cbq_dump_wrr()
1281 opt.weight = cl->weight; in cbq_dump_wrr()
1291 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_fopt() argument
1296 if (cl->split || cl->defmap) { in cbq_dump_fopt()
1297 opt.split = cl->split ? cl->split->common.classid : 0; in cbq_dump_fopt()
1298 opt.defmap = cl->defmap; in cbq_dump_fopt()
1310 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_attr() argument
1312 if (cbq_dump_lss(skb, cl) < 0 || in cbq_dump_attr()
1313 cbq_dump_rate(skb, cl) < 0 || in cbq_dump_attr()
1314 cbq_dump_wrr(skb, cl) < 0 || in cbq_dump_attr()
1315 cbq_dump_fopt(skb, cl) < 0) in cbq_dump_attr()
1350 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class() local
1353 if (cl->tparent) in cbq_dump_class()
1354 tcm->tcm_parent = cl->tparent->common.classid; in cbq_dump_class()
1357 tcm->tcm_handle = cl->common.classid; in cbq_dump_class()
1358 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1363 if (cbq_dump_attr(skb, cl) < 0) in cbq_dump_class()
1377 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class_stats() local
1380 cl->xstats.avgidle = cl->avgidle; in cbq_dump_class_stats()
1381 cl->xstats.undertime = 0; in cbq_dump_class_stats()
1382 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); in cbq_dump_class_stats()
1384 if (cl->undertime != PSCHED_PASTPERFECT) in cbq_dump_class_stats()
1385 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1388 d, NULL, &cl->bstats) < 0 || in cbq_dump_class_stats()
1389 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in cbq_dump_class_stats()
1390 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) in cbq_dump_class_stats()
1393 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in cbq_dump_class_stats()
1399 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_graft() local
1403 cl->common.classid, extack); in cbq_graft()
1408 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1414 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_leaf() local
1416 return cl->q; in cbq_leaf()
1421 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_qlen_notify() local
1423 cbq_deactivate_class(cl); in cbq_qlen_notify()
1433 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) in cbq_destroy_class() argument
1437 WARN_ON(cl->filters); in cbq_destroy_class()
1439 tcf_block_put(cl->block); in cbq_destroy_class()
1440 qdisc_put(cl->q); in cbq_destroy_class()
1441 qdisc_put_rtab(cl->R_tab); in cbq_destroy_class()
1442 gen_kill_estimator(&cl->rate_est); in cbq_destroy_class()
1443 if (cl != &q->link) in cbq_destroy_class()
1444 kfree(cl); in cbq_destroy_class()
1451 struct cbq_class *cl; in cbq_destroy() local
1463 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_destroy()
1464 tcf_block_put(cl->block); in cbq_destroy()
1465 cl->block = NULL; in cbq_destroy()
1469 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1471 cbq_destroy_class(sch, cl); in cbq_destroy()
1482 struct cbq_class *cl = (struct cbq_class *)*arg; in cbq_change_class() local
1497 if (cl) { in cbq_change_class()
1500 if (cl->tparent && in cbq_change_class()
1501 cl->tparent->common.classid != parentid) { in cbq_change_class()
1505 if (!cl->tparent && parentid != TC_H_ROOT) { in cbq_change_class()
1519 err = gen_replace_estimator(&cl->bstats, NULL, in cbq_change_class()
1520 &cl->rate_est, in cbq_change_class()
1534 if (cl->next_alive != NULL) in cbq_change_class()
1535 cbq_deactivate_class(cl); in cbq_change_class()
1538 qdisc_put_rtab(cl->R_tab); in cbq_change_class()
1539 cl->R_tab = rtab; in cbq_change_class()
1543 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1546 cbq_rmprio(q, cl); in cbq_change_class()
1547 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1551 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1553 if (cl->q->q.qlen) in cbq_change_class()
1554 cbq_activate_class(cl); in cbq_change_class()
1610 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in cbq_change_class()
1611 if (cl == NULL) in cbq_change_class()
1614 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in cbq_change_class()
1616 kfree(cl); in cbq_change_class()
1621 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, in cbq_change_class()
1627 tcf_block_put(cl->block); in cbq_change_class()
1628 kfree(cl); in cbq_change_class()
1633 cl->R_tab = rtab; in cbq_change_class()
1635 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, in cbq_change_class()
1637 if (!cl->q) in cbq_change_class()
1638 cl->q = &noop_qdisc; in cbq_change_class()
1640 qdisc_hash_add(cl->q, true); in cbq_change_class()
1642 cl->common.classid = classid; in cbq_change_class()
1643 cl->tparent = parent; in cbq_change_class()
1644 cl->qdisc = sch; in cbq_change_class()
1645 cl->allot = parent->allot; in cbq_change_class()
1646 cl->quantum = cl->allot; in cbq_change_class()
1647 cl->weight = cl->R_tab->rate.rate; in cbq_change_class()
1650 cbq_link_class(cl); in cbq_change_class()
1651 cl->borrow = cl->tparent; in cbq_change_class()
1652 if (cl->tparent != &q->link) in cbq_change_class()
1653 cl->share = cl->tparent; in cbq_change_class()
1655 cl->minidle = -0x7FFFFFFF; in cbq_change_class()
1656 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1657 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1658 if (cl->ewma_log == 0) in cbq_change_class()
1659 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1660 if (cl->maxidle == 0) in cbq_change_class()
1661 cl->maxidle = q->link.maxidle; in cbq_change_class()
1662 if (cl->avpkt == 0) in cbq_change_class()
1663 cl->avpkt = q->link.avpkt; in cbq_change_class()
1665 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1670 *arg = (unsigned long)cl; in cbq_change_class()
1681 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_delete() local
1683 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1688 qdisc_purge_queue(cl->q); in cbq_delete()
1690 if (cl->next_alive) in cbq_delete()
1691 cbq_deactivate_class(cl); in cbq_delete()
1693 if (q->tx_borrowed == cl) in cbq_delete()
1695 if (q->tx_class == cl) { in cbq_delete()
1700 if (q->rx_class == cl) in cbq_delete()
1704 cbq_unlink_class(cl); in cbq_delete()
1705 cbq_adjust_levels(cl->tparent); in cbq_delete()
1706 cl->defmap = 0; in cbq_delete()
1707 cbq_sync_defmap(cl); in cbq_delete()
1709 cbq_rmprio(q, cl); in cbq_delete()
1712 cbq_destroy_class(sch, cl); in cbq_delete()
1720 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_tcf_block() local
1722 if (cl == NULL) in cbq_tcf_block()
1723 cl = &q->link; in cbq_tcf_block()
1725 return cl->block; in cbq_tcf_block()
1733 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter() local
1735 if (cl) { in cbq_bind_filter()
1736 if (p && p->level <= cl->level) in cbq_bind_filter()
1738 cl->filters++; in cbq_bind_filter()
1739 return (unsigned long)cl; in cbq_bind_filter()
1746 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_unbind_filter() local
1748 cl->filters--; in cbq_unbind_filter()
1754 struct cbq_class *cl; in cbq_walk() local
1761 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()
1766 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in cbq_walk()