Home
last modified time | relevance | path

Searched refs:qstats (Results 1 – 25 of 46) sorted by relevance

12

/kernel/linux/linux-5.10/samples/bpf/
Dhbm.c169 struct hbm_queue_stats qstats = {0}; in run_bpf_prog() local
190 qstats.rate = rate; in run_bpf_prog()
191 qstats.stats = stats_flag ? 1 : 0; in run_bpf_prog()
192 qstats.loopback = loopback_flag ? 1 : 0; in run_bpf_prog()
193 qstats.no_cn = no_cn_flag ? 1 : 0; in run_bpf_prog()
194 if (bpf_map_update_elem(map_fd, &key, &qstats, BPF_ANY)) { in run_bpf_prog()
217 bpf_map_lookup_elem(map_fd, &key, &qstats); in run_bpf_prog()
225 last_cg_tx_bytes = qstats.bytes_total; in run_bpf_prog()
246 bpf_map_lookup_elem(map_fd, &key, &qstats); in run_bpf_prog()
247 new_cg_tx_bytes = qstats.bytes_total; in run_bpf_prog()
[all …]
/kernel/linux/linux-5.10/net/core/
Dgen_stats.c286 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue_cpu() argument
294 qstats->qlen = 0; in __gnet_stats_copy_queue_cpu()
295 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu()
296 qstats->drops += qcpu->drops; in __gnet_stats_copy_queue_cpu()
297 qstats->requeues += qcpu->requeues; in __gnet_stats_copy_queue_cpu()
298 qstats->overlimits += qcpu->overlimits; in __gnet_stats_copy_queue_cpu()
302 void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue() argument
308 __gnet_stats_copy_queue_cpu(qstats, cpu); in __gnet_stats_copy_queue()
310 qstats->qlen = q->qlen; in __gnet_stats_copy_queue()
311 qstats->backlog = q->backlog; in __gnet_stats_copy_queue()
[all …]
/kernel/linux/linux-5.10/net/sched/
Dsch_skbprio.c35 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; member
85 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue()
101 q->qstats[prio].drops++; in skbprio_enqueue()
102 q->qstats[prio].overlimits++; in skbprio_enqueue()
108 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue()
117 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue()
118 q->qstats[lp].drops++; in skbprio_enqueue()
119 q->qstats[lp].overlimits++; in skbprio_enqueue()
152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue()
189 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init()
[all …]
Dsch_mq.c45 .qstats = &sch->qstats, in mq_offload_stats()
160 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mq_dump()
176 __gnet_stats_copy_queue(&sch->qstats, in mq_dump()
178 &qdisc->qstats, qlen); in mq_dump()
184 sch->qstats.qlen += qdisc->qstats.qlen; in mq_dump()
185 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump()
186 sch->qstats.drops += qdisc->qstats.drops; in mq_dump()
187 sch->qstats.requeues += qdisc->qstats.requeues; in mq_dump()
188 sch->qstats.overlimits += qdisc->qstats.overlimits; in mq_dump()
Dsch_mqprio.c416 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mqprio_dump()
433 __gnet_stats_copy_queue(&sch->qstats, in mqprio_dump()
435 &qdisc->qstats, qlen); in mqprio_dump()
441 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump()
442 sch->qstats.drops += qdisc->qstats.drops; in mqprio_dump()
443 sch->qstats.requeues += qdisc->qstats.requeues; in mqprio_dump()
444 sch->qstats.overlimits += qdisc->qstats.overlimits; in mqprio_dump()
538 struct gnet_stats_queue qstats = {0}; in mqprio_dump_class_stats() local
563 __gnet_stats_copy_queue(&qstats, in mqprio_dump_class_stats()
565 &qdisc->qstats, in mqprio_dump_class_stats()
[all …]
Dsch_gred.c117 return sch->qstats.backlog; in gred_backlog()
181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue()
278 if (!sch->qstats.backlog) in gred_dequeue()
346 opt.set.qstats = &sch->qstats; in gred_offload()
380 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats()
385 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; in gred_offload_dump_stats()
386 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats()
387 sch->qstats.drops += hw_stats->stats.qstats[i].drops; in gred_offload_dump_stats()
388 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; in gred_offload_dump_stats()
389 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; in gred_offload_dump_stats()
Dsch_fifo.c22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue()
45 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue()
51 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
91 qopt.stats.qstats = &sch->qstats; in fifo_offload_dump()
Dsch_prio.c88 sch->qstats.backlog += len; in prio_enqueue()
138 sch->qstats.backlog = 0; in prio_reset()
158 opt.replace_params.qstats = &sch->qstats; in prio_offload()
256 .qstats = &sch->qstats, in prio_dump_offload()
Dsch_tbf.c155 qopt.replace_params.qstats = &sch->qstats; in tbf_offload_change()
182 qopt.stats.qstats = &sch->qstats; in tbf_offload_dump()
244 sch->qstats.backlog += len; in tbf_enqueue()
319 sch->qstats.backlog = 0; in tbf_reset()
Dsch_ets.c45 struct gnet_stats_queue qstats; member
125 qopt.replace_params.qstats = &sch->qstats; in ets_offload_change()
185 qopt.stats.qstats = &sch->qstats; in ets_offload_dump()
439 cl->qstats.drops++; in ets_qdisc_enqueue()
450 sch->qstats.backlog += len; in ets_qdisc_enqueue()
725 sch->qstats.backlog = 0; in ets_qdisc_reset()
Dsch_red.c79 child->qstats.backlog); in red_enqueue()
179 sch->qstats.backlog = 0; in red_reset()
205 opt.set.qstats = &sch->qstats; in red_offload()
402 .stats.qstats = &sch->qstats, in red_dump_offload_stats()
Dsch_drr.c23 struct gnet_stats_queue qstats; member
272 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) in drr_dump_class_stats()
361 cl->qstats.drops++; in drr_enqueue()
372 sch->qstats.backlog += len; in drr_enqueue()
446 sch->qstats.backlog = 0; in drr_reset_qdisc()
Dsch_fq_codel.c178 sch->qstats.drops += i; in fq_codel_drop()
179 sch->qstats.backlog -= len; in fq_codel_drop()
220 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue()
233 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue()
268 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func()
303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue()
351 sch->qstats.backlog = 0; in fq_codel_reset()
Dsch_hhf.c405 prev_backlog = sch->qstats.backlog; in hhf_enqueue()
414 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue()
564 prev_backlog = sch->qstats.backlog; in hhf_change()
571 prev_backlog - sch->qstats.backlog); in hhf_change()
Dsch_codel.c75 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func()
94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
Dsch_sfb.c407 sch->qstats.backlog += len; in sfb_enqueue()
459 sch->qstats.backlog = 0; in sfb_reset()
584 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
Dsch_pie.c96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue()
430 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); in pie_timer()
525 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog); in pie_qdisc_dequeue()
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_stats.h431 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
446 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
459 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
464 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
465 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
466 + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
471 qstats_old->f = qstats->f; \
476 ADD_64(estats->s##_hi, qstats->s##_hi, \
477 estats->s##_lo, qstats->s##_lo); \
480 qstats_old->s##_hi_old = qstats->s##_hi; \
[all …]
Dbnx2x_stats.c953 struct bnx2x_eth_q_stats *qstats = in bnx2x_storm_stats_update() local
977 qstats->total_bytes_received_hi = in bnx2x_storm_stats_update()
978 qstats->total_broadcast_bytes_received_hi; in bnx2x_storm_stats_update()
979 qstats->total_bytes_received_lo = in bnx2x_storm_stats_update()
980 qstats->total_broadcast_bytes_received_lo; in bnx2x_storm_stats_update()
982 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update()
983 qstats->total_multicast_bytes_received_hi, in bnx2x_storm_stats_update()
984 qstats->total_bytes_received_lo, in bnx2x_storm_stats_update()
985 qstats->total_multicast_bytes_received_lo); in bnx2x_storm_stats_update()
987 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update()
[all …]
/kernel/linux/linux-5.10/include/net/
Dsch_generic.h105 struct gnet_stats_queue qstats; member
333 struct gnet_stats_queue *qstats; member
533 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum()
897 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec()
909 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc()
935 sch->qstats.drops += count; in __qdisc_qstats_drop()
938 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) in qstats_drop_inc() argument
940 qstats->drops++; in qstats_drop_inc()
943 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) in qstats_overlimit_inc() argument
945 qstats->overlimits++; in qstats_overlimit_inc()
[all …]
Dpkt_cls.h773 struct gnet_stats_queue *qstats; member
813 struct gnet_stats_queue *qstats; member
852 struct gnet_stats_queue *qstats; member
858 struct gnet_stats_queue qstats[MAX_DPs]; member
885 struct gnet_stats_queue *qstats; member
926 struct gnet_stats_queue *qstats; member
954 struct gnet_stats_queue *qstats; member
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/
Dvnic_main.c66 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ argument
68 for (src64 = &qstats->x_grp.unicast, \
87 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local
92 stats->tx_drop_state += qstats->tx_drop_state; in hfi1_vnic_update_stats()
93 stats->tx_dlid_zero += qstats->tx_dlid_zero; in hfi1_vnic_update_stats()
95 SUM_GRP_COUNTERS(stats, qstats, tx_grp); in hfi1_vnic_update_stats()
102 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local
107 stats->rx_drop_state += qstats->rx_drop_state; in hfi1_vnic_update_stats()
108 stats->rx_oversize += qstats->rx_oversize; in hfi1_vnic_update_stats()
109 stats->rx_runt += qstats->rx_runt; in hfi1_vnic_update_stats()
[all …]
/kernel/linux/linux-5.10/Documentation/networking/
Dgen_stats.rst26 struct gnet_stats_queue qstats;
33 mystruct->qstats.backlog += skb->pkt_len;
50 gnet_stats_copy_queue(&dump, &mystruct->qstats) < 0 ||
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/abm/
Dqdisc.c462 struct gnet_stats_queue *qstats) in nfp_abm_stats_calculate() argument
466 qstats->qlen += new->backlog_pkts - old->backlog_pkts; in nfp_abm_stats_calculate()
467 qstats->backlog += new->backlog_bytes - old->backlog_bytes; in nfp_abm_stats_calculate()
468 qstats->overlimits += new->overlimits - old->overlimits; in nfp_abm_stats_calculate()
469 qstats->drops += new->drops - old->drops; in nfp_abm_stats_calculate()
503 &stats->bstats[i], &stats->qstats[i]); in nfp_abm_gred_stats()
654 stats->bstats, stats->qstats); in nfp_abm_red_stats()
811 stats->bstats, stats->qstats); in nfp_abm_mq_stats()
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum_qdisc.c329 stats_ptr->qstats->drops += drops; in mlxsw_sp_qdisc_update_stats()
330 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); in mlxsw_sp_qdisc_update_stats()
489 struct gnet_stats_queue *qstats) in mlxsw_sp_qdisc_leaf_unoffload() argument
495 qstats->backlog -= backlog; in mlxsw_sp_qdisc_leaf_unoffload()
506 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); in mlxsw_sp_qdisc_red_unoffload()
550 stats_ptr->qstats->overlimits += overlimits; in mlxsw_sp_qdisc_get_red_stats()
748 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); in mlxsw_sp_qdisc_tbf_unoffload()
1044 struct gnet_stats_queue *qstats) in __mlxsw_sp_qdisc_ets_unoffload() argument
1050 qstats->backlog -= backlog; in __mlxsw_sp_qdisc_ets_unoffload()
1061 p->qstats); in mlxsw_sp_qdisc_prio_unoffload()
[all …]

12