/net/core/ |
D | gen_stats.c | 282 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue_cpu() argument 290 qstats->qlen = 0; in __gnet_stats_copy_queue_cpu() 291 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 292 qstats->drops += qcpu->drops; in __gnet_stats_copy_queue_cpu() 293 qstats->requeues += qcpu->requeues; in __gnet_stats_copy_queue_cpu() 294 qstats->overlimits += qcpu->overlimits; in __gnet_stats_copy_queue_cpu() 298 void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue() argument 304 __gnet_stats_copy_queue_cpu(qstats, cpu); in __gnet_stats_copy_queue() 306 qstats->qlen = q->qlen; in __gnet_stats_copy_queue() 307 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() [all …]
|
/net/sched/ |
D | sch_skbprio.c | 35 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; member 85 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 101 q->qstats[prio].drops++; in skbprio_enqueue() 102 q->qstats[prio].overlimits++; in skbprio_enqueue() 108 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 117 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 118 q->qstats[lp].drops++; in skbprio_enqueue() 119 q->qstats[lp].overlimits++; in skbprio_enqueue() 152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue() 189 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init() [all …]
|
D | sch_mq.c | 45 .qstats = &sch->qstats, in mq_offload_stats() 137 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mq_dump() 153 __gnet_stats_copy_queue(&sch->qstats, in mq_dump() 155 &qdisc->qstats, qlen); in mq_dump() 161 sch->qstats.qlen += qdisc->qstats.qlen; in mq_dump() 162 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump() 163 sch->qstats.drops += qdisc->qstats.drops; in mq_dump() 164 sch->qstats.requeues += qdisc->qstats.requeues; in mq_dump() 165 sch->qstats.overlimits += qdisc->qstats.overlimits; in mq_dump()
|
D | sch_mqprio.c | 436 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mqprio_dump() 453 __gnet_stats_copy_queue(&sch->qstats, in mqprio_dump() 455 &qdisc->qstats, qlen); in mqprio_dump() 461 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump() 462 sch->qstats.drops += qdisc->qstats.drops; in mqprio_dump() 463 sch->qstats.requeues += qdisc->qstats.requeues; in mqprio_dump() 464 sch->qstats.overlimits += qdisc->qstats.overlimits; in mqprio_dump() 558 struct gnet_stats_queue qstats = {0}; in mqprio_dump_class_stats() local 583 __gnet_stats_copy_queue(&qstats, in mqprio_dump_class_stats() 585 &qdisc->qstats, in mqprio_dump_class_stats() [all …]
|
D | sch_gred.c | 117 return sch->qstats.backlog; in gred_backlog() 181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 278 if (!sch->qstats.backlog) in gred_dequeue() 346 opt.set.qstats = &sch->qstats; in gred_offload() 380 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() 385 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; in gred_offload_dump_stats() 386 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() 387 sch->qstats.drops += hw_stats->stats.qstats[i].drops; in gred_offload_dump_stats() 388 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; in gred_offload_dump_stats() 389 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; in gred_offload_dump_stats()
|
D | sch_red.c | 67 child->qstats.backlog); in red_enqueue() 101 sch->qstats.backlog += len; in red_enqueue() 145 sch->qstats.backlog = 0; in red_reset() 170 opt.set.qstats = &sch->qstats; in red_offload() 297 .stats.qstats = &sch->qstats, in red_dump_offload_stats()
|
D | sch_prio.c | 88 sch->qstats.backlog += len; in prio_enqueue() 138 sch->qstats.backlog = 0; in prio_reset() 158 opt.replace_params.qstats = &sch->qstats; in prio_offload() 256 .qstats = &sch->qstats, in prio_dump_offload()
|
D | sch_fifo.c | 21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 44 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 50 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
|
D | sch_fq_codel.c | 179 sch->qstats.drops += i; in fq_codel_drop() 180 sch->qstats.backlog -= len; in fq_codel_drop() 221 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 234 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue() 269 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue() 352 sch->qstats.backlog = 0; in fq_codel_reset()
|
D | sch_drr.c | 23 struct gnet_stats_queue qstats; member 272 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) in drr_dump_class_stats() 361 cl->qstats.drops++; in drr_enqueue() 372 sch->qstats.backlog += len; in drr_enqueue() 446 sch->qstats.backlog = 0; in drr_reset_qdisc()
|
D | sch_tbf.c | 199 sch->qstats.backlog += len; in tbf_enqueue() 274 sch->qstats.backlog = 0; in tbf_reset() 448 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
|
D | sch_hhf.c | 405 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 414 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 564 prev_backlog = sch->qstats.backlog; in hhf_change() 571 prev_backlog - sch->qstats.backlog); in hhf_change()
|
D | sch_sfb.c | 407 sch->qstats.backlog += len; in sfb_enqueue() 459 sch->qstats.backlog = 0; in sfb_reset() 584 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
D | sch_codel.c | 75 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
|
D | sch_pie.c | 113 if (sch->qstats.backlog < 2 * mtu) in drop_early() 268 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue() 333 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
|
D | sch_hfsc.c | 115 struct gnet_stats_queue qstats; member 1338 qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog); in hfsc_dump_class_stats() 1346 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) in hfsc_dump_class_stats() 1501 sch->qstats.backlog = 0; in hfsc_reset_qdisc() 1565 cl->qstats.drops++; in hfsc_enqueue() 1586 sch->qstats.backlog += len; in hfsc_enqueue()
|
D | sch_plug.c | 92 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
|
D | sch_qfq.c | 135 struct gnet_stats_queue qstats; member 1237 cl->qstats.drops++; in qfq_enqueue() 1248 cl->qstats.drops++; in qfq_enqueue() 1256 sch->qstats.backlog += len; in qfq_enqueue() 1479 sch->qstats.backlog = 0; in qfq_reset_qdisc()
|
D | sch_cake.c | 1504 sch->qstats.backlog -= len; in cake_drop() 1509 sch->qstats.drops++; in cake_drop() 1696 sch->qstats.overlimits++; in cake_enqueue() 1735 sch->qstats.backlog += slen; in cake_enqueue() 1751 sch->qstats.drops++; in cake_enqueue() 1770 sch->qstats.backlog += len; in cake_enqueue() 1889 sch->qstats.backlog -= len; in cake_dequeue_one() 1935 sch->qstats.overlimits++; in cake_dequeue()
|
D | sch_htb.c | 616 sch->qstats.backlog += len; in htb_enqueue() 973 sch->qstats.backlog = 0; in htb_reset() 1052 sch->qstats.overlimits = q->overlimits; in htb_dump()
|
D | act_mirred.c | 298 res->qstats = this_cpu_ptr(m->common.cpu_qstats); in tcf_mirred_act()
|
D | sch_etf.c | 448 sch->qstats.backlog = 0; in etf_reset()
|
D | sch_cbs.c | 98 sch->qstats.backlog += len; in cbs_child_enqueue()
|
D | sch_choke.c | 325 sch->qstats.backlog = 0; in choke_reset()
|
D | sch_generic.c | 160 q->qstats.requeues++; in dev_requeue_skb() 973 qdisc->qstats.backlog = 0; in qdisc_reset()
|