• Home
  • Raw
  • Download

Lines Matching refs:sch

92 static inline int gred_wred_mode_check(struct Qdisc *sch)  in gred_wred_mode_check()  argument
94 struct gred_sched *table = qdisc_priv(sch); in gred_wred_mode_check()
115 struct Qdisc *sch) in gred_backlog() argument
118 return sch->qstats.backlog; in gred_backlog()
165 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, in gred_enqueue() argument
169 struct gred_sched *t = qdisc_priv(sch); in gred_enqueue()
182 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue()
183 sch->limit)) in gred_enqueue()
184 return qdisc_enqueue_tail(skb, sch); in gred_enqueue()
214 gred_backlog(t, q, sch)); in gred_enqueue()
227 qdisc_qstats_overlimit(sch); in gred_enqueue()
237 qdisc_qstats_overlimit(sch); in gred_enqueue()
247 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { in gred_enqueue()
249 return qdisc_enqueue_tail(skb, sch); in gred_enqueue()
254 return qdisc_drop(skb, sch, to_free); in gred_enqueue()
257 qdisc_drop(skb, sch, to_free); in gred_enqueue()
261 static struct sk_buff *gred_dequeue(struct Qdisc *sch) in gred_dequeue() argument
264 struct gred_sched *t = qdisc_priv(sch); in gred_dequeue()
266 skb = qdisc_dequeue_head(sch); in gred_dequeue()
279 if (!sch->qstats.backlog) in gred_dequeue()
293 static void gred_reset(struct Qdisc *sch) in gred_reset() argument
296 struct gred_sched *t = qdisc_priv(sch); in gred_reset()
298 qdisc_reset_queue(sch); in gred_reset()
311 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) in gred_offload() argument
313 struct gred_sched *table = qdisc_priv(sch); in gred_offload()
314 struct net_device *dev = qdisc_dev(sch); in gred_offload()
322 opt->handle = sch->handle; in gred_offload()
323 opt->parent = sch->parent; in gred_offload()
348 opt->set.qstats = &sch->qstats; in gred_offload()
354 static int gred_offload_dump_stats(struct Qdisc *sch) in gred_offload_dump_stats() argument
356 struct gred_sched *table = qdisc_priv(sch); in gred_offload_dump_stats()
367 hw_stats->handle = sch->handle; in gred_offload_dump_stats()
368 hw_stats->parent = sch->parent; in gred_offload_dump_stats()
376 ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); in gred_offload_dump_stats()
380 sch_tree_lock(sch); in gred_offload_dump_stats()
390 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; in gred_offload_dump_stats()
391 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats()
392 sch->qstats.drops += hw_stats->stats.qstats[i].drops; in gred_offload_dump_stats()
393 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; in gred_offload_dump_stats()
394 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; in gred_offload_dump_stats()
396 _bstats_update(&sch->bstats, bytes, packets); in gred_offload_dump_stats()
397 sch_tree_unlock(sch); in gred_offload_dump_stats()
408 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, in gred_change_table_def() argument
411 struct gred_sched *table = qdisc_priv(sch); in gred_change_table_def()
439 sch_tree_lock(sch); in gred_change_table_def()
450 sch_tree_unlock(sch); in gred_change_table_def()
455 if (gred_wred_mode_check(sch)) in gred_change_table_def()
477 gred_offload(sch, TC_GRED_REPLACE); in gred_change_table_def()
481 static inline int gred_change_vq(struct Qdisc *sch, int dp, in gred_change_vq() argument
487 struct gred_sched *table = qdisc_priv(sch); in gred_change_vq()
505 if (ctl->limit > sch->limit) in gred_change_vq()
506 q->limit = sch->limit; in gred_change_vq()
642 static int gred_change(struct Qdisc *sch, struct nlattr *opt, in gred_change() argument
645 struct gred_sched *table = qdisc_priv(sch); in gred_change()
660 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_change()
661 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); in gred_change()
704 sch_tree_lock(sch); in gred_change()
706 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, in gred_change()
716 if (gred_wred_mode_check(sch)) in gred_change()
720 sch_tree_unlock(sch); in gred_change()
723 gred_offload(sch, TC_GRED_REPLACE); in gred_change()
727 sch_tree_unlock(sch); in gred_change()
732 static int gred_init(struct Qdisc *sch, struct nlattr *opt, in gred_init() argument
735 struct gred_sched *table = qdisc_priv(sch); in gred_init()
754 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_init()
756 sch->limit = qdisc_dev(sch)->tx_queue_len in gred_init()
757 * psched_mtu(qdisc_dev(sch)); in gred_init()
759 if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) { in gred_init()
765 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); in gred_init()
768 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) in gred_dump() argument
770 struct gred_sched *table = qdisc_priv(sch); in gred_dump()
781 if (gred_offload_dump_stats(sch)) in gred_dump()
798 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) in gred_dump()
824 opt.backlog = gred_backlog(table, q, sch); in gred_dump()
880 gred_backlog(table, q, sch))) in gred_dump()
908 static void gred_destroy(struct Qdisc *sch) in gred_destroy() argument
910 struct gred_sched *table = qdisc_priv(sch); in gred_destroy()
916 gred_offload(sch, TC_GRED_DESTROY); in gred_destroy()