• Home
  • Raw
  • Download

Lines Matching refs:sch

91 static inline int gred_wred_mode_check(struct Qdisc *sch)  in gred_wred_mode_check()  argument
93 struct gred_sched *table = qdisc_priv(sch); in gred_wred_mode_check()
114 struct Qdisc *sch) in gred_backlog() argument
117 return sch->qstats.backlog; in gred_backlog()
164 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, in gred_enqueue() argument
168 struct gred_sched *t = qdisc_priv(sch); in gred_enqueue()
181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue()
182 sch->limit)) in gred_enqueue()
183 return qdisc_enqueue_tail(skb, sch); in gred_enqueue()
213 gred_backlog(t, q, sch)); in gred_enqueue()
226 qdisc_qstats_overlimit(sch); in gred_enqueue()
236 qdisc_qstats_overlimit(sch); in gred_enqueue()
246 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { in gred_enqueue()
248 return qdisc_enqueue_tail(skb, sch); in gred_enqueue()
253 return qdisc_drop(skb, sch, to_free); in gred_enqueue()
256 qdisc_drop(skb, sch, to_free); in gred_enqueue()
260 static struct sk_buff *gred_dequeue(struct Qdisc *sch) in gred_dequeue() argument
263 struct gred_sched *t = qdisc_priv(sch); in gred_dequeue()
265 skb = qdisc_dequeue_head(sch); in gred_dequeue()
278 if (!sch->qstats.backlog) in gred_dequeue()
292 static void gred_reset(struct Qdisc *sch) in gred_reset() argument
295 struct gred_sched *t = qdisc_priv(sch); in gred_reset()
297 qdisc_reset_queue(sch); in gred_reset()
310 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) in gred_offload() argument
312 struct gred_sched *table = qdisc_priv(sch); in gred_offload()
313 struct net_device *dev = qdisc_dev(sch); in gred_offload()
316 .handle = sch->handle, in gred_offload()
317 .parent = sch->parent, in gred_offload()
346 opt.set.qstats = &sch->qstats; in gred_offload()
352 static int gred_offload_dump_stats(struct Qdisc *sch) in gred_offload_dump_stats() argument
354 struct gred_sched *table = qdisc_priv(sch); in gred_offload_dump_stats()
364 hw_stats->handle = sch->handle; in gred_offload_dump_stats()
365 hw_stats->parent = sch->parent; in gred_offload_dump_stats()
371 ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); in gred_offload_dump_stats()
382 _bstats_update(&sch->bstats, in gred_offload_dump_stats()
385 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; in gred_offload_dump_stats()
386 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats()
387 sch->qstats.drops += hw_stats->stats.qstats[i].drops; in gred_offload_dump_stats()
388 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; in gred_offload_dump_stats()
389 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; in gred_offload_dump_stats()
401 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, in gred_change_table_def() argument
404 struct gred_sched *table = qdisc_priv(sch); in gred_change_table_def()
432 sch_tree_lock(sch); in gred_change_table_def()
443 sch_tree_unlock(sch); in gred_change_table_def()
448 if (gred_wred_mode_check(sch)) in gred_change_table_def()
470 gred_offload(sch, TC_GRED_REPLACE); in gred_change_table_def()
474 static inline int gred_change_vq(struct Qdisc *sch, int dp, in gred_change_vq() argument
480 struct gred_sched *table = qdisc_priv(sch); in gred_change_vq()
498 if (ctl->limit > sch->limit) in gred_change_vq()
499 q->limit = sch->limit; in gred_change_vq()
635 static int gred_change(struct Qdisc *sch, struct nlattr *opt, in gred_change() argument
638 struct gred_sched *table = qdisc_priv(sch); in gred_change()
656 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_change()
657 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); in gred_change()
700 sch_tree_lock(sch); in gred_change()
702 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, in gred_change()
712 if (gred_wred_mode_check(sch)) in gred_change()
716 sch_tree_unlock(sch); in gred_change()
719 gred_offload(sch, TC_GRED_REPLACE); in gred_change()
723 sch_tree_unlock(sch); in gred_change()
728 static int gred_init(struct Qdisc *sch, struct nlattr *opt, in gred_init() argument
749 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_init()
751 sch->limit = qdisc_dev(sch)->tx_queue_len in gred_init()
752 * psched_mtu(qdisc_dev(sch)); in gred_init()
754 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); in gred_init()
757 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) in gred_dump() argument
759 struct gred_sched *table = qdisc_priv(sch); in gred_dump()
770 if (gred_offload_dump_stats(sch)) in gred_dump()
787 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) in gred_dump()
813 opt.backlog = gred_backlog(table, q, sch); in gred_dump()
870 gred_backlog(table, q, sch))) in gred_dump()
900 static void gred_destroy(struct Qdisc *sch) in gred_destroy() argument
902 struct gred_sched *table = qdisc_priv(sch); in gred_destroy()
909 gred_offload(sch, TC_GRED_DESTROY); in gred_destroy()