Lines Matching refs:q
115 struct Qdisc *q; /* Elementary queueing discipline */ member
178 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
182 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
219 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
220 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
250 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
307 cl_tail = q->active[prio]; in cbq_activate_class()
308 q->active[prio] = cl; in cbq_activate_class()
315 q->activemask |= (1<<prio); in cbq_activate_class()
327 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_deactivate_class() local
330 struct cbq_class *cl_prev = q->active[prio]; in cbq_deactivate_class()
338 if (cl == q->active[prio]) { in cbq_deactivate_class()
339 q->active[prio] = cl_prev; in cbq_deactivate_class()
340 if (cl == q->active[prio]) { in cbq_deactivate_class()
341 q->active[prio] = NULL; in cbq_deactivate_class()
342 q->activemask &= ~(1<<prio); in cbq_deactivate_class()
348 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
352 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
354 int toplevel = q->toplevel; in cbq_mark_toplevel()
356 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { in cbq_mark_toplevel()
361 q->toplevel = cl->level; in cbq_mark_toplevel()
371 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_enqueue() local
376 q->rx_class = cl; in cbq_enqueue()
386 cl->q->__parent = sch; in cbq_enqueue()
388 ret = qdisc_enqueue(skb, cl->q); in cbq_enqueue()
390 sch->q.qlen++; in cbq_enqueue()
391 cbq_mark_toplevel(q, cl); in cbq_enqueue()
399 cbq_mark_toplevel(q, cl); in cbq_enqueue()
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_classic() local
412 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_classic()
430 cl->undertime = q->now + delay; in cbq_ovl_classic()
435 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_ovl_classic()
436 q->wd_expires = delay; in cbq_ovl_classic()
442 if (q->toplevel == TC_CBQ_MAXLEVEL) { in cbq_ovl_classic()
444 psched_tdiff_t base_delay = q->wd_expires; in cbq_ovl_classic()
447 delay = b->undertime - q->now; in cbq_ovl_classic()
455 q->wd_expires = base_delay; in cbq_ovl_classic()
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_rclassic() local
469 if (cl->level > q->toplevel) { in cbq_ovl_rclassic()
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_delay() local
485 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_delay()
492 psched_time_t sched = q->now; in cbq_ovl_delay()
500 cl->undertime = q->now + delay; in cbq_ovl_delay()
506 q->pmask |= (1<<TC_CBQ_MAXPRIO); in cbq_ovl_delay()
509 if (hrtimer_try_to_cancel(&q->delay_timer) && in cbq_ovl_delay()
511 hrtimer_get_expires(&q->delay_timer), in cbq_ovl_delay()
513 hrtimer_set_expires(&q->delay_timer, expires); in cbq_ovl_delay()
514 hrtimer_restart(&q->delay_timer); in cbq_ovl_delay()
521 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_ovl_delay()
522 q->wd_expires = delay; in cbq_ovl_delay()
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_lowprio() local
531 cl->penalized = q->now + cl->penalty; in cbq_ovl_lowprio()
535 q->pmask |= (1<<cl->cpriority); in cbq_ovl_lowprio()
545 if (cl->q->ops->drop) in cbq_ovl_drop()
546 if (cl->q->ops->drop(cl->q)) in cbq_ovl_drop()
547 cl->qdisc->q.qlen--; in cbq_ovl_drop()
552 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, in cbq_undelay_prio() argument
556 struct cbq_class *cl_prev = q->active[prio]; in cbq_undelay_prio()
571 if (cl == q->active[prio]) { in cbq_undelay_prio()
572 q->active[prio] = cl_prev; in cbq_undelay_prio()
573 if (cl == q->active[prio]) { in cbq_undelay_prio()
574 q->active[prio] = NULL; in cbq_undelay_prio()
582 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
589 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, in cbq_undelay() local
591 struct Qdisc *sch = q->watchdog.qdisc; in cbq_undelay()
598 pmask = q->pmask; in cbq_undelay()
599 q->pmask = 0; in cbq_undelay()
607 tmp = cbq_undelay_prio(q, prio, now); in cbq_undelay()
609 q->pmask |= 1<<prio; in cbq_undelay()
620 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); in cbq_undelay()
632 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_reshape_fail() local
633 struct cbq_class *cl = q->rx_class; in cbq_reshape_fail()
635 q->rx_class = NULL; in cbq_reshape_fail()
640 cbq_mark_toplevel(q, cl); in cbq_reshape_fail()
642 q->rx_class = cl; in cbq_reshape_fail()
643 cl->q->__parent = sch; in cbq_reshape_fail()
645 ret = qdisc_enqueue(skb, cl->q); in cbq_reshape_fail()
647 sch->q.qlen++; in cbq_reshape_fail()
672 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
675 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
676 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
679 q->toplevel = borrowed->level; in cbq_update_toplevel()
688 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_update_toplevel()
694 cbq_update(struct cbq_sched_data *q) in cbq_update() argument
696 struct cbq_class *this = q->tx_class; in cbq_update()
698 int len = q->tx_len; in cbq_update()
701 q->tx_class = NULL; in cbq_update()
705 now = q->now + L2T(&q->link, len); in cbq_update()
762 idle -= L2T(&q->link, len); in cbq_update()
779 cbq_update_toplevel(q, this, q->tx_borrowed); in cbq_update()
785 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit() local
791 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
813 if (cl->level > q->toplevel) in cbq_under_limit()
815 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
824 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_prio() local
829 cl_tail = cl_prev = q->active[prio]; in cbq_dequeue_prio()
839 if (cl->q->q.qlen && in cbq_dequeue_prio()
852 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
862 q->tx_class = cl; in cbq_dequeue_prio()
863 q->tx_borrowed = borrow; in cbq_dequeue_prio()
873 q->tx_len = qdisc_pkt_len(skb); in cbq_dequeue_prio()
876 q->active[prio] = cl; in cbq_dequeue_prio()
883 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
898 q->active[prio] = NULL; in cbq_dequeue_prio()
899 q->activemask &= ~(1<<prio); in cbq_dequeue_prio()
900 if (cl->q->q.qlen) in cbq_dequeue_prio()
905 q->active[prio] = cl_tail; in cbq_dequeue_prio()
907 if (cl->q->q.qlen) in cbq_dequeue_prio()
919 q->active[prio] = cl_prev; in cbq_dequeue_prio()
927 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_1() local
931 activemask = q->activemask & 0xFF; in cbq_dequeue_1()
946 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue() local
951 if (q->tx_class) in cbq_dequeue()
952 cbq_update(q); in cbq_dequeue()
954 q->now = now; in cbq_dequeue()
957 q->wd_expires = 0; in cbq_dequeue()
962 sch->q.qlen--; in cbq_dequeue()
985 if (q->toplevel == TC_CBQ_MAXLEVEL && in cbq_dequeue()
986 q->link.undertime == PSCHED_PASTPERFECT) in cbq_dequeue()
989 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_dequeue()
990 q->link.undertime = PSCHED_PASTPERFECT; in cbq_dequeue()
997 if (sch->q.qlen) { in cbq_dequeue()
999 if (q->wd_expires) in cbq_dequeue()
1000 qdisc_watchdog_schedule(&q->watchdog, in cbq_dequeue()
1001 now + q->wd_expires); in cbq_dequeue()
1028 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) in cbq_normalize_quanta() argument
1033 if (q->quanta[prio] == 0) in cbq_normalize_quanta()
1036 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_normalize_quanta()
1037 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
1042 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
1043 q->quanta[prio]; in cbq_normalize_quanta()
1057 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap() local
1076 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_sync_defmap()
1079 hlist_for_each_entry(c, &q->clhash.hash[h], in cbq_sync_defmap()
1125 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_unlink_class() local
1127 qdisc_class_hash_remove(&q->clhash, &this->common); in cbq_unlink_class()
1152 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_link_class() local
1156 qdisc_class_hash_insert(&q->clhash, &this->common); in cbq_link_class()
1171 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_drop() local
1177 cl_head = q->active[prio]; in cbq_drop()
1183 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { in cbq_drop()
1184 sch->q.qlen--; in cbq_drop()
1185 if (!cl->q->q.qlen) in cbq_drop()
1197 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_reset() local
1202 q->activemask = 0; in cbq_reset()
1203 q->pmask = 0; in cbq_reset()
1204 q->tx_class = NULL; in cbq_reset()
1205 q->tx_borrowed = NULL; in cbq_reset()
1206 qdisc_watchdog_cancel(&q->watchdog); in cbq_reset()
1207 hrtimer_cancel(&q->delay_timer); in cbq_reset()
1208 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_reset()
1209 q->now = psched_get_time(); in cbq_reset()
1212 q->active[prio] = NULL; in cbq_reset()
1214 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_reset()
1215 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1216 qdisc_reset(cl->q); in cbq_reset()
1225 sch->q.qlen = 0; in cbq_reset()
1250 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1252 q->nclasses[cl->priority]--; in cbq_rmprio()
1253 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1254 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1257 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1259 q->nclasses[cl->priority]++; in cbq_addprio()
1260 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1261 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1266 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr() local
1279 cbq_addprio(q, cl); in cbq_set_wrr()
1317 if (cl->q->handle) { in cbq_set_police()
1319 cl->q->reshape_fail = cbq_reshape_fail; in cbq_set_police()
1321 cl->q->reshape_fail = NULL; in cbq_set_police()
1345 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_init() local
1359 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) in cbq_init()
1362 err = qdisc_class_hash_init(&q->clhash); in cbq_init()
1366 q->link.refcnt = 1; in cbq_init()
1367 q->link.sibling = &q->link; in cbq_init()
1368 q->link.common.classid = sch->handle; in cbq_init()
1369 q->link.qdisc = sch; in cbq_init()
1370 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in cbq_init()
1372 if (!q->link.q) in cbq_init()
1373 q->link.q = &noop_qdisc; in cbq_init()
1375 q->link.priority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1376 q->link.priority2 = TC_CBQ_MAXPRIO - 1; in cbq_init()
1377 q->link.cpriority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1378 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; in cbq_init()
1379 q->link.overlimit = cbq_ovl_classic; in cbq_init()
1380 q->link.allot = psched_mtu(qdisc_dev(sch)); in cbq_init()
1381 q->link.quantum = q->link.allot; in cbq_init()
1382 q->link.weight = q->link.R_tab->rate.rate; in cbq_init()
1384 q->link.ewma_log = TC_CBQ_DEF_EWMA; in cbq_init()
1385 q->link.avpkt = q->link.allot/2; in cbq_init()
1386 q->link.minidle = -0x7FFFFFFF; in cbq_init()
1388 qdisc_watchdog_init(&q->watchdog, sch); in cbq_init()
1389 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); in cbq_init()
1390 q->delay_timer.function = cbq_undelay; in cbq_init()
1391 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_init()
1392 q->now = psched_get_time(); in cbq_init()
1394 cbq_link_class(&q->link); in cbq_init()
1397 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_init()
1399 cbq_addprio(q, &q->link); in cbq_init()
1403 qdisc_put_rtab(q->link.R_tab); in cbq_init()
1540 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump() local
1546 if (cbq_dump_attr(skb, &q->link) < 0) in cbq_dump()
1558 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_stats() local
1560 q->link.xstats.avgidle = q->link.avgidle; in cbq_dump_stats()
1561 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); in cbq_dump_stats()
1576 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1594 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_class_stats() local
1601 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1605 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) in cbq_dump_class_stats()
1628 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1636 return cl->q; in cbq_leaf()
1643 if (cl->q->q.qlen == 0) in cbq_qlen_notify()
1649 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_get() local
1650 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_get()
1661 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy_class() local
1666 qdisc_destroy(cl->q); in cbq_destroy_class()
1669 if (cl != &q->link) in cbq_destroy_class()
1675 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy() local
1681 q->rx_class = NULL; in cbq_destroy()
1688 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1689 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) in cbq_destroy()
1692 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1693 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1697 qdisc_class_hash_destroy(&q->clhash); in cbq_destroy()
1707 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_put() local
1710 if (q->rx_class == cl) in cbq_put()
1711 q->rx_class = NULL; in cbq_put()
1724 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_change_class() local
1781 cbq_rmprio(q, cl); in cbq_change_class()
1796 if (cl->q->q.qlen) in cbq_change_class()
1818 cbq_class_lookup(q, classid)) in cbq_change_class()
1825 if (++q->hgenerator >= 0x8000) in cbq_change_class()
1826 q->hgenerator = 1; in cbq_change_class()
1827 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) in cbq_change_class()
1833 classid = classid|q->hgenerator; in cbq_change_class()
1836 parent = &q->link; in cbq_change_class()
1838 parent = cbq_class_lookup(q, parentid); in cbq_change_class()
1862 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); in cbq_change_class()
1863 if (!cl->q) in cbq_change_class()
1864 cl->q = &noop_qdisc; in cbq_change_class()
1875 if (cl->tparent != &q->link) in cbq_change_class()
1882 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1884 cl->maxidle = q->link.maxidle; in cbq_change_class()
1886 cl->avpkt = q->link.avpkt; in cbq_change_class()
1898 qdisc_class_hash_grow(sch, &q->clhash); in cbq_change_class()
1910 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_delete() local
1914 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1919 qlen = cl->q->q.qlen; in cbq_delete()
1920 backlog = cl->q->qstats.backlog; in cbq_delete()
1921 qdisc_reset(cl->q); in cbq_delete()
1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); in cbq_delete()
1927 if (q->tx_borrowed == cl) in cbq_delete()
1928 q->tx_borrowed = q->tx_class; in cbq_delete()
1929 if (q->tx_class == cl) { in cbq_delete()
1930 q->tx_class = NULL; in cbq_delete()
1931 q->tx_borrowed = NULL; in cbq_delete()
1934 if (q->rx_class == cl) in cbq_delete()
1935 q->rx_class = NULL; in cbq_delete()
1943 cbq_rmprio(q, cl); in cbq_delete()
1958 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_find_tcf() local
1962 cl = &q->link; in cbq_find_tcf()
1970 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_bind_filter() local
1972 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter()
1992 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_walk() local
1999 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_walk()
2000 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()