/net/sched/ |
D | sch_gred.c | 39 u32 backlog; /* bytes on the virtualQ */ member 117 return sch->qstats.backlog; in gred_backlog() 119 return q->backlog; in gred_backlog() 181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 247 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 275 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 278 if (!sch->qstats.backlog) in gred_dequeue() 281 if (!q->backlog) in gred_dequeue() 306 q->backlog = 0; in gred_reset() 344 opt.set.tab[i].backlog = &q->backlog; in gred_offload() [all …]
|
D | sch_sfq.c | 109 unsigned int backlog; member 308 slot->backlog -= len; in sfq_drop() 374 slot->backlog = 0; /* should already be 0 anyway... */ in sfq_enqueue() 381 slot->backlog); in sfq_enqueue() 432 sch->qstats.backlog -= delta; in sfq_enqueue() 433 slot->backlog -= delta; in sfq_enqueue() 443 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue() 504 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue() 556 slot->backlog = 0; in sfq_rehash() 587 slot->backlog); in sfq_rehash() [all …]
|
D | sch_skbprio.c | 85 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 108 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 117 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue() 213 sch->qstats.backlog = 0; in skbprio_reset()
|
D | sch_fifo.c | 21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 44 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 50 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
|
D | sch_fq_codel.c | 180 sch->qstats.backlog -= len; in fq_codel_drop() 221 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 234 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue() 269 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue() 352 sch->qstats.backlog = 0; in fq_codel_reset() 652 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
|
D | sch_tbf.c | 199 sch->qstats.backlog += len; in tbf_enqueue() 274 sch->qstats.backlog = 0; in tbf_reset() 446 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
|
D | sch_hhf.c | 405 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 414 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 564 prev_backlog = sch->qstats.backlog; in hhf_change() 571 prev_backlog - sch->qstats.backlog); in hhf_change()
|
D | sch_codel.c | 75 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
|
D | sch_pie.c | 113 if (sch->qstats.backlog < 2 * mtu) in drop_early() 268 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue() 333 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
|
D | sch_mq.c | 162 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump()
|
D | sch_sfb.c | 456 sch->qstats.backlog = 0; in sfb_reset() 581 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
D | sch_plug.c | 92 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
|
D | sch_prio.c | 88 sch->qstats.backlog += len; in prio_enqueue() 138 sch->qstats.backlog = 0; in prio_reset()
|
D | sch_red.c | 66 child->qstats.backlog); in red_enqueue() 143 sch->qstats.backlog = 0; in red_reset()
|
D | sch_drr.c | 372 sch->qstats.backlog += len; in drr_enqueue() 446 sch->qstats.backlog = 0; in drr_reset_qdisc()
|
D | sch_dsmark.c | 277 sch->qstats.backlog += len; in dsmark_enqueue() 410 sch->qstats.backlog = 0; in dsmark_reset()
|
/net/core/ |
D | gen_stats.c | 291 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 307 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() 343 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|
/net/sunrpc/ |
D | stats.c | 154 ktime_t backlog, execute, now; in rpc_count_iostats_metrics() local 170 backlog = 0; in rpc_count_iostats_metrics() 172 backlog = ktime_sub(req->rq_xtime, task->tk_start); in rpc_count_iostats_metrics() 173 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog); in rpc_count_iostats_metrics() 185 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute); in rpc_count_iostats_metrics()
|
D | xprt.c | 1479 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_request_transmit() 1534 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_add_backlog() 1539 if (rpc_wake_up_next(&xprt->backlog) == NULL) in xprt_wake_up_backlog() 1551 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_throttle_congested() 1873 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init() 1943 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy_cb()
|
/net/tipc/ |
D | link.c | 164 } backlog[5]; member 864 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; in link_prepare_wakeup() 907 l->backlog[imp].len = 0; in tipc_link_reset() 908 l->backlog[imp].target_bskb = NULL; in tipc_link_reset() 964 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { in tipc_link_xmit() 1003 tskb = &l->backlog[imp].target_bskb; in tipc_link_xmit() 1012 l->backlog[imp].len++; in tipc_link_xmit() 1017 l->backlog[imp].target_bskb = NULL; in tipc_link_xmit() 1018 l->backlog[imp].len += skb_queue_len(list); in tipc_link_xmit() 1045 l->backlog[imp].len--; in tipc_link_advance_backlog() [all …]
|
/net/rxrpc/ |
D | af_rxrpc.c | 208 static int rxrpc_listen(struct socket *sock, int backlog) in rxrpc_listen() argument 215 _enter("%p,%d", rx, backlog); in rxrpc_listen() 228 if (backlog == INT_MAX) in rxrpc_listen() 229 backlog = max; in rxrpc_listen() 230 else if (backlog < 0 || backlog > max) in rxrpc_listen() 233 sk->sk_max_ack_backlog = backlog; in rxrpc_listen() 241 if (backlog == 0) { in rxrpc_listen()
|
D | call_accept.c | 163 struct rxrpc_backlog *b = rx->backlog; in rxrpc_service_prealloc() 169 rx->backlog = b; in rxrpc_service_prealloc() 187 struct rxrpc_backlog *b = rx->backlog; in rxrpc_discard_prealloc() 193 rx->backlog = NULL; in rxrpc_discard_prealloc() 270 struct rxrpc_backlog *b = rx->backlog; in rxrpc_alloc_incoming_call() 661 struct rxrpc_backlog *b = rx->backlog; in rxrpc_kernel_charge_accept()
|
/net/dccp/ |
D | proto.c | 241 static inline int dccp_listen_start(struct sock *sk, int backlog) in dccp_listen_start() argument 249 return inet_csk_listen_start(sk, backlog); in dccp_listen_start() 931 int inet_dccp_listen(struct socket *sock, int backlog) in inet_dccp_listen() argument 947 sk->sk_max_ack_backlog = backlog; in inet_dccp_listen() 956 err = dccp_listen_start(sk, backlog); in inet_dccp_listen()
|
/net/atm/ |
D | svc.c | 282 static int svc_listen(struct socket *sock, int backlog) in svc_listen() argument 315 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; in svc_listen()
|
/net/llc/ |
D | af_llc.c | 523 static int llc_ui_listen(struct socket *sock, int backlog) in llc_ui_listen() argument 538 if (!(unsigned int)backlog) /* BSDism */ in llc_ui_listen() 539 backlog = 1; in llc_ui_listen() 540 sk->sk_max_ack_backlog = backlog; in llc_ui_listen()
|