• Home
  • Raw
  • Download

Lines Matching refs:q

115 	struct Qdisc		*q;		/* Elementary queueing discipline */  member
179 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
183 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
220 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
221 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
249 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
302 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
306 cl_tail = q->active[prio]; in cbq_activate_class()
307 q->active[prio] = cl; in cbq_activate_class()
314 q->activemask |= (1<<prio); in cbq_activate_class()
326 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_deactivate_class() local
329 struct cbq_class *cl_prev = q->active[prio]; in cbq_deactivate_class()
337 if (cl == q->active[prio]) { in cbq_deactivate_class()
338 q->active[prio] = cl_prev; in cbq_deactivate_class()
339 if (cl == q->active[prio]) { in cbq_deactivate_class()
340 q->active[prio] = NULL; in cbq_deactivate_class()
341 q->activemask &= ~(1<<prio); in cbq_deactivate_class()
347 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
351 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
353 int toplevel = q->toplevel; in cbq_mark_toplevel()
355 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { in cbq_mark_toplevel()
360 incr = now - q->now_rt; in cbq_mark_toplevel()
361 now = q->now + incr; in cbq_mark_toplevel()
365 q->toplevel = cl->level; in cbq_mark_toplevel()
375 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_enqueue() local
380 q->rx_class = cl; in cbq_enqueue()
390 cl->q->__parent = sch; in cbq_enqueue()
392 ret = qdisc_enqueue(skb, cl->q); in cbq_enqueue()
394 sch->q.qlen++; in cbq_enqueue()
395 cbq_mark_toplevel(q, cl); in cbq_enqueue()
403 cbq_mark_toplevel(q, cl); in cbq_enqueue()
415 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_classic() local
416 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_classic()
434 cl->undertime = q->now + delay; in cbq_ovl_classic()
439 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_ovl_classic()
440 q->wd_expires = delay; in cbq_ovl_classic()
446 if (q->toplevel == TC_CBQ_MAXLEVEL) { in cbq_ovl_classic()
448 psched_tdiff_t base_delay = q->wd_expires; in cbq_ovl_classic()
451 delay = b->undertime - q->now; in cbq_ovl_classic()
459 q->wd_expires = base_delay; in cbq_ovl_classic()
469 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_rclassic() local
473 if (cl->level > q->toplevel) { in cbq_ovl_rclassic()
488 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_delay() local
489 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_delay()
496 psched_time_t sched = q->now; in cbq_ovl_delay()
504 cl->undertime = q->now + delay; in cbq_ovl_delay()
510 q->pmask |= (1<<TC_CBQ_MAXPRIO); in cbq_ovl_delay()
513 if (hrtimer_try_to_cancel(&q->delay_timer) && in cbq_ovl_delay()
515 hrtimer_get_expires(&q->delay_timer), in cbq_ovl_delay()
517 hrtimer_set_expires(&q->delay_timer, expires); in cbq_ovl_delay()
518 hrtimer_restart(&q->delay_timer); in cbq_ovl_delay()
525 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_ovl_delay()
526 q->wd_expires = delay; in cbq_ovl_delay()
533 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_lowprio() local
535 cl->penalized = q->now + cl->penalty; in cbq_ovl_lowprio()
539 q->pmask |= (1<<cl->cpriority); in cbq_ovl_lowprio()
549 if (cl->q->ops->drop) in cbq_ovl_drop()
550 if (cl->q->ops->drop(cl->q)) in cbq_ovl_drop()
551 cl->qdisc->q.qlen--; in cbq_ovl_drop()
556 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, in cbq_undelay_prio() argument
560 struct cbq_class *cl_prev = q->active[prio]; in cbq_undelay_prio()
575 if (cl == q->active[prio]) { in cbq_undelay_prio()
576 q->active[prio] = cl_prev; in cbq_undelay_prio()
577 if (cl == q->active[prio]) { in cbq_undelay_prio()
578 q->active[prio] = NULL; in cbq_undelay_prio()
586 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
593 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, in cbq_undelay() local
595 struct Qdisc *sch = q->watchdog.qdisc; in cbq_undelay()
602 pmask = q->pmask; in cbq_undelay()
603 q->pmask = 0; in cbq_undelay()
611 tmp = cbq_undelay_prio(q, prio, now); in cbq_undelay()
613 q->pmask |= 1<<prio; in cbq_undelay()
624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); in cbq_undelay()
636 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_reshape_fail() local
637 struct cbq_class *cl = q->rx_class; in cbq_reshape_fail()
639 q->rx_class = NULL; in cbq_reshape_fail()
644 cbq_mark_toplevel(q, cl); in cbq_reshape_fail()
646 q->rx_class = cl; in cbq_reshape_fail()
647 cl->q->__parent = sch; in cbq_reshape_fail()
649 ret = qdisc_enqueue(skb, cl->q); in cbq_reshape_fail()
651 sch->q.qlen++; in cbq_reshape_fail()
676 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
679 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
680 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
683 q->toplevel = borrowed->level; in cbq_update_toplevel()
692 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_update_toplevel()
698 cbq_update(struct cbq_sched_data *q) in cbq_update() argument
700 struct cbq_class *this = q->tx_class; in cbq_update()
702 int len = q->tx_len; in cbq_update()
704 q->tx_class = NULL; in cbq_update()
720 idle = q->now - cl->last; in cbq_update()
761 idle -= L2T(&q->link, len); in cbq_update()
764 cl->undertime = q->now + idle; in cbq_update()
774 cl->last = q->now; in cbq_update()
777 cbq_update_toplevel(q, this, q->tx_borrowed); in cbq_update()
783 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit() local
789 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
811 if (cl->level > q->toplevel) in cbq_under_limit()
813 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
822 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_prio() local
827 cl_tail = cl_prev = q->active[prio]; in cbq_dequeue_prio()
837 if (cl->q->q.qlen && in cbq_dequeue_prio()
850 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
860 q->tx_class = cl; in cbq_dequeue_prio()
861 q->tx_borrowed = borrow; in cbq_dequeue_prio()
871 q->tx_len = qdisc_pkt_len(skb); in cbq_dequeue_prio()
874 q->active[prio] = cl; in cbq_dequeue_prio()
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
896 q->active[prio] = NULL; in cbq_dequeue_prio()
897 q->activemask &= ~(1<<prio); in cbq_dequeue_prio()
898 if (cl->q->q.qlen) in cbq_dequeue_prio()
903 q->active[prio] = cl_tail; in cbq_dequeue_prio()
905 if (cl->q->q.qlen) in cbq_dequeue_prio()
917 q->active[prio] = cl_prev; in cbq_dequeue_prio()
925 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_1() local
929 activemask = q->activemask & 0xFF; in cbq_dequeue_1()
944 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue() local
949 incr = now - q->now_rt; in cbq_dequeue()
951 if (q->tx_class) { in cbq_dequeue()
960 incr2 = L2T(&q->link, q->tx_len); in cbq_dequeue()
961 q->now += incr2; in cbq_dequeue()
962 cbq_update(q); in cbq_dequeue()
965 q->now += incr; in cbq_dequeue()
967 if (now > q->now) in cbq_dequeue()
968 q->now = now; in cbq_dequeue()
970 q->now_rt = now; in cbq_dequeue()
973 q->wd_expires = 0; in cbq_dequeue()
978 sch->q.qlen--; in cbq_dequeue()
1001 if (q->toplevel == TC_CBQ_MAXLEVEL && in cbq_dequeue()
1002 q->link.undertime == PSCHED_PASTPERFECT) in cbq_dequeue()
1005 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_dequeue()
1006 q->link.undertime = PSCHED_PASTPERFECT; in cbq_dequeue()
1013 if (sch->q.qlen) { in cbq_dequeue()
1015 if (q->wd_expires) in cbq_dequeue()
1016 qdisc_watchdog_schedule(&q->watchdog, in cbq_dequeue()
1017 now + q->wd_expires); in cbq_dequeue()
1044 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) in cbq_normalize_quanta() argument
1049 if (q->quanta[prio] == 0) in cbq_normalize_quanta()
1052 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_normalize_quanta()
1053 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
1058 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
1059 q->quanta[prio]; in cbq_normalize_quanta()
1072 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap() local
1091 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_sync_defmap()
1094 hlist_for_each_entry(c, &q->clhash.hash[h], in cbq_sync_defmap()
1140 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_unlink_class() local
1142 qdisc_class_hash_remove(&q->clhash, &this->common); in cbq_unlink_class()
1167 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_link_class() local
1171 qdisc_class_hash_insert(&q->clhash, &this->common); in cbq_link_class()
1186 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_drop() local
1192 cl_head = q->active[prio]; in cbq_drop()
1198 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { in cbq_drop()
1199 sch->q.qlen--; in cbq_drop()
1200 if (!cl->q->q.qlen) in cbq_drop()
1212 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_reset() local
1217 q->activemask = 0; in cbq_reset()
1218 q->pmask = 0; in cbq_reset()
1219 q->tx_class = NULL; in cbq_reset()
1220 q->tx_borrowed = NULL; in cbq_reset()
1221 qdisc_watchdog_cancel(&q->watchdog); in cbq_reset()
1222 hrtimer_cancel(&q->delay_timer); in cbq_reset()
1223 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_reset()
1224 q->now = psched_get_time(); in cbq_reset()
1225 q->now_rt = q->now; in cbq_reset()
1228 q->active[prio] = NULL; in cbq_reset()
1230 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_reset()
1231 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1232 qdisc_reset(cl->q); in cbq_reset()
1241 sch->q.qlen = 0; in cbq_reset()
1266 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1268 q->nclasses[cl->priority]--; in cbq_rmprio()
1269 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1270 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1273 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1275 q->nclasses[cl->priority]++; in cbq_addprio()
1276 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1277 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1282 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr() local
1295 cbq_addprio(q, cl); in cbq_set_wrr()
1333 if (cl->q->handle) { in cbq_set_police()
1335 cl->q->reshape_fail = cbq_reshape_fail; in cbq_set_police()
1337 cl->q->reshape_fail = NULL; in cbq_set_police()
1361 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_init() local
1375 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) in cbq_init()
1378 err = qdisc_class_hash_init(&q->clhash); in cbq_init()
1382 q->link.refcnt = 1; in cbq_init()
1383 q->link.sibling = &q->link; in cbq_init()
1384 q->link.common.classid = sch->handle; in cbq_init()
1385 q->link.qdisc = sch; in cbq_init()
1386 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in cbq_init()
1388 if (!q->link.q) in cbq_init()
1389 q->link.q = &noop_qdisc; in cbq_init()
1391 q->link.priority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1392 q->link.priority2 = TC_CBQ_MAXPRIO - 1; in cbq_init()
1393 q->link.cpriority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1394 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; in cbq_init()
1395 q->link.overlimit = cbq_ovl_classic; in cbq_init()
1396 q->link.allot = psched_mtu(qdisc_dev(sch)); in cbq_init()
1397 q->link.quantum = q->link.allot; in cbq_init()
1398 q->link.weight = q->link.R_tab->rate.rate; in cbq_init()
1400 q->link.ewma_log = TC_CBQ_DEF_EWMA; in cbq_init()
1401 q->link.avpkt = q->link.allot/2; in cbq_init()
1402 q->link.minidle = -0x7FFFFFFF; in cbq_init()
1404 qdisc_watchdog_init(&q->watchdog, sch); in cbq_init()
1405 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in cbq_init()
1406 q->delay_timer.function = cbq_undelay; in cbq_init()
1407 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_init()
1408 q->now = psched_get_time(); in cbq_init()
1409 q->now_rt = q->now; in cbq_init()
1411 cbq_link_class(&q->link); in cbq_init()
1414 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_init()
1416 cbq_addprio(q, &q->link); in cbq_init()
1420 qdisc_put_rtab(q->link.R_tab); in cbq_init()
1556 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump() local
1562 if (cbq_dump_attr(skb, &q->link) < 0) in cbq_dump()
1575 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_stats() local
1577 q->link.xstats.avgidle = q->link.avgidle; in cbq_dump_stats()
1578 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); in cbq_dump_stats()
1593 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1612 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_class_stats() local
1615 cl->qstats.qlen = cl->q->q.qlen; in cbq_dump_class_stats()
1620 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1647 *old = cl->q; in cbq_graft()
1648 cl->q = new; in cbq_graft()
1649 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in cbq_graft()
1660 return cl->q; in cbq_leaf()
1667 if (cl->q->q.qlen == 0) in cbq_qlen_notify()
1673 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_get() local
1674 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_get()
1685 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy_class() local
1690 qdisc_destroy(cl->q); in cbq_destroy_class()
1693 if (cl != &q->link) in cbq_destroy_class()
1699 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy() local
1705 q->rx_class = NULL; in cbq_destroy()
1712 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1713 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) in cbq_destroy()
1716 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1717 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1721 qdisc_class_hash_destroy(&q->clhash); in cbq_destroy()
1731 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_put() local
1734 if (q->rx_class == cl) in cbq_put()
1735 q->rx_class = NULL; in cbq_put()
1748 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_change_class() local
1805 cbq_rmprio(q, cl); in cbq_change_class()
1820 if (cl->q->q.qlen) in cbq_change_class()
1842 cbq_class_lookup(q, classid)) in cbq_change_class()
1849 if (++q->hgenerator >= 0x8000) in cbq_change_class()
1850 q->hgenerator = 1; in cbq_change_class()
1851 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) in cbq_change_class()
1857 classid = classid|q->hgenerator; in cbq_change_class()
1860 parent = &q->link; in cbq_change_class()
1862 parent = cbq_class_lookup(q, parentid); in cbq_change_class()
1886 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); in cbq_change_class()
1887 if (!cl->q) in cbq_change_class()
1888 cl->q = &noop_qdisc; in cbq_change_class()
1899 if (cl->tparent != &q->link) in cbq_change_class()
1906 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1908 cl->maxidle = q->link.maxidle; in cbq_change_class()
1910 cl->avpkt = q->link.avpkt; in cbq_change_class()
1922 qdisc_class_hash_grow(sch, &q->clhash); in cbq_change_class()
1934 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_delete() local
1938 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1943 qlen = cl->q->q.qlen; in cbq_delete()
1944 qdisc_reset(cl->q); in cbq_delete()
1945 qdisc_tree_decrease_qlen(cl->q, qlen); in cbq_delete()
1950 if (q->tx_borrowed == cl) in cbq_delete()
1951 q->tx_borrowed = q->tx_class; in cbq_delete()
1952 if (q->tx_class == cl) { in cbq_delete()
1953 q->tx_class = NULL; in cbq_delete()
1954 q->tx_borrowed = NULL; in cbq_delete()
1957 if (q->rx_class == cl) in cbq_delete()
1958 q->rx_class = NULL; in cbq_delete()
1966 cbq_rmprio(q, cl); in cbq_delete()
1980 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_find_tcf() local
1984 cl = &q->link; in cbq_find_tcf()
1992 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_bind_filter() local
1994 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter()
2014 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_walk() local
2021 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_walk()
2022 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()