/net/sched/ |
D | sch_sfq.c | 195 int d = q->qs[x].qlen + SFQ_DEPTH; in sfq_link() 213 if (n == p && q->max_depth == q->qs[x].qlen + 1) in sfq_dec() 228 d = q->qs[x].qlen; in sfq_inc() 252 sch->q.qlen--; in sfq_drop() 268 sch->q.qlen--; in sfq_drop() 305 if (q->qs[x].qlen >= q->limit) in sfq_enqueue() 311 if (q->qs[x].qlen == 1) { /* The flow is new */ in sfq_enqueue() 322 if (++sch->q.qlen <= q->limit) { in sfq_enqueue() 362 sch->q.qlen--; in sfq_dequeue() 366 if (q->qs[a].qlen == 0) { in sfq_dequeue() [all …]
|
D | sch_drr.c | 54 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue() 242 if (cl->qdisc->q.qlen == 0) in drr_qlen_notify() 274 if (cl->qdisc->q.qlen) in drr_dump_class_stats() 369 if (cl->qdisc->q.qlen == 1) { in drr_enqueue() 379 sch->q.qlen++; in drr_enqueue() 402 if (cl->qdisc->q.qlen == 0) in drr_dequeue() 404 sch->q.qlen--; in drr_dequeue() 425 sch->q.qlen--; in drr_drop() 426 if (cl->qdisc->q.qlen == 0) in drr_drop() 456 if (cl->qdisc->q.qlen) in drr_reset_qdisc() [all …]
|
D | sch_htb.c | 513 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); in htb_activate() 546 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue() 574 sch->q.qlen++; in htb_enqueue() 791 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { in htb_dequeue_tree() 836 if (!cl->un.leaf.q->q.qlen) in htb_dequeue_tree() 855 sch->q.qlen--; in htb_dequeue() 859 if (!sch->q.qlen) in htb_dequeue() 888 sch->q.qlen--; in htb_dequeue() 914 sch->q.qlen--; in htb_drop() 915 if (!cl->un.leaf.q->q.qlen) in htb_drop() [all …]
|
D | sch_prio.c | 88 sch->q.qlen++; in prio_enqueue() 119 sch->q.qlen--; in prio_dequeue() 137 sch->q.qlen--; in prio_drop() 153 sch->q.qlen = 0; in prio_reset() 193 qdisc_tree_decrease_qlen(child, child->q.qlen); in prio_tune() 212 old->q.qlen); in prio_tune() 274 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in prio_graft()
|
D | sch_multiq.c | 87 sch->q.qlen++; in multiq_enqueue() 115 sch->q.qlen--; in multiq_dequeue() 164 sch->q.qlen--; in multiq_drop() 181 sch->q.qlen = 0; in multiq_reset() 219 qdisc_tree_decrease_qlen(child, child->q.qlen); in multiq_tune() 241 old->q.qlen); in multiq_tune() 310 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in multiq_graft()
|
D | sch_red.c | 99 sch->q.qlen++; in red_enqueue() 119 sch->q.qlen--; in red_dequeue() 143 sch->q.qlen--; in red_drop() 158 sch->q.qlen = 0; in red_reset() 204 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); in red_change() 289 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in red_graft()
|
D | sch_tbf.c | 136 sch->q.qlen++; in tbf_enqueue() 148 sch->q.qlen--; in tbf_drop() 189 sch->q.qlen--; in tbf_dequeue() 218 sch->q.qlen = 0; in tbf_reset() 287 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); in tbf_change() 396 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in tbf_graft()
|
D | sch_cbq.c | 391 sch->q.qlen++; in cbq_enqueue() 551 cl->qdisc->q.qlen--; in cbq_ovl_drop() 651 sch->q.qlen++; in cbq_reshape_fail() 682 if (cl->q->q.qlen > 1) { in cbq_update_toplevel() 838 if (cl->q->q.qlen && in cbq_dequeue_prio() 882 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio() 899 if (cl->q->q.qlen) in cbq_dequeue_prio() 906 if (cl->q->q.qlen) in cbq_dequeue_prio() 975 sch->q.qlen--; in cbq_dequeue() 1009 if (sch->q.qlen) { in cbq_dequeue() [all …]
|
D | sch_dsmark.c | 73 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in dsmark_graft() 265 sch->q.qlen++; in dsmark_enqueue() 287 sch->q.qlen--; in dsmark_dequeue() 338 sch->q.qlen--; in dsmark_drop() 404 sch->q.qlen = 0; in dsmark_reset()
|
D | sch_teql.c | 84 if (q->q.qlen < dev->tx_queue_len) { in teql_enqueue() 112 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; in teql_dequeue() 136 sch->q.qlen = 0; in teql_reset()
|
D | sch_hfsc.c | 774 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) in update_vf() 902 unsigned int len = cl->qdisc->q.qlen; in hfsc_purge_queue() 1037 if (cl->qdisc->q.qlen != 0) { in hfsc_change_class() 1238 if (cl->qdisc->q.qlen == 0) { in hfsc_qlen_notify() 1369 cl->qstats.qlen = cl->qdisc->q.qlen; in hfsc_dump_class_stats() 1533 sch->q.qlen = 0; in hfsc_reset_qdisc() 1596 if (cl->qdisc->q.qlen == 1) in hfsc_enqueue() 1603 sch->q.qlen++; in hfsc_enqueue() 1618 if (sch->q.qlen == 0) in hfsc_dequeue() 1654 if (cl->qdisc->q.qlen != 0) { in hfsc_dequeue() [all …]
|
D | sch_generic.c | 42 return q->q.qlen; in qdisc_qlen() 167 dev->name, ret, q->q.qlen); in qdisc_restart() 386 qdisc->q.qlen++; in pfifo_fast_enqueue() 400 qdisc->q.qlen--; in pfifo_fast_dequeue() 430 qdisc->q.qlen = 0; in pfifo_fast_reset()
|
D | sch_netem.c | 241 sch->q.qlen++; in netem_enqueue() 258 sch->q.qlen--; in netem_drop() 284 sch->q.qlen--; in netem_dequeue() 299 sch->q.qlen = 0; in netem_reset()
|
D | sch_atm.c | 457 sch->q.qlen++; in atm_tc_enqueue() 525 sch->q.qlen--; in atm_tc_dequeue() 580 sch->q.qlen = 0; in atm_tc_reset() 658 flow->qstats.qlen = flow->q->q.qlen; in atm_tc_dump_class_stats()
|
D | em_meta.c | 368 dst->value = skb->sk->sk_receive_queue.qlen; in META_COLLECTOR() 374 dst->value = skb->sk->sk_write_queue.qlen; in META_COLLECTOR() 422 dst->value = skb->sk->sk_error_queue.qlen; in META_COLLECTOR()
|
D | sch_api.c | 657 sch->q.qlen -= n; in qdisc_tree_decrease_qlen() 1159 q->qstats.qlen = q->q.qlen; in tc_fill_qdisc()
|
/net/ipv4/netfilter/ |
D | ipt_ULOG.c | 76 unsigned int qlen; /* number of nlmsgs' in the skb */ member 103 if (ub->qlen > 1) in ulog_send() 108 ub->qlen, nlgroupnum + 1); in ulog_send() 111 ub->qlen = 0; in ulog_send() 190 } else if (ub->qlen >= loginfo->qthreshold || in ipt_ulog_packet() 201 pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen, in ipt_ulog_packet() 205 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, in ipt_ulog_packet() 207 ub->qlen++; in ipt_ulog_packet() 252 if (ub->qlen > 1) in ipt_ulog_packet() 264 if (ub->qlen >= loginfo->qthreshold) { in ipt_ulog_packet()
|
/net/bridge/netfilter/ |
D | ebt_ulog.c | 60 unsigned int qlen; /* number of nlmsgs' in the skb */ member 82 if (ub->qlen > 1) in ulog_send() 88 ub->qlen = 0; in ulog_send() 161 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, in ebt_ulog_packet() 163 ub->qlen++; in ebt_ulog_packet() 171 if (ub->qlen == 1) in ebt_ulog_packet() 201 if (ub->qlen > 1) in ebt_ulog_packet() 206 if (ub->qlen >= uloginfo->qthreshold) in ebt_ulog_packet()
|
/net/core/ |
D | request_sock.c | 112 if (lopt->qlen != 0) { in reqsk_queue_destroy() 120 lopt->qlen--; in reqsk_queue_destroy() 126 WARN_ON(lopt->qlen != 0); in reqsk_queue_destroy()
|
D | gen_stats.c | 163 d->tc_stats.qlen = q->qlen; in gnet_stats_copy_queue()
|
/net/netfilter/ |
D | nfnetlink_log.c | 54 unsigned int qlen; /* number of nlmsgs in skb */ member 321 if (inst->qlen > 1) in __nfulnl_send() 328 inst->qlen = 0; in __nfulnl_send() 631 inst->qlen++; in nfulnl_log_packet() 636 if (inst->qlen >= qthreshold) in nfulnl_log_packet() 908 inst->peer_pid, inst->qlen, in seq_show()
|
/net/mac80211/ |
D | wme.c | 266 for (len = qdisc->q.qlen; len > 0; len--) { in ieee80211_requeue() 274 for (len = list.qlen; len > 0; len--) { in ieee80211_requeue()
|
/net/ipv4/ |
D | inet_connection_sock.c | 435 if (lopt == NULL || lopt->qlen == 0) in inet_csk_reqsk_queue_prune() 455 if (lopt->qlen>>(lopt->max_qlen_log-1)) { in inet_csk_reqsk_queue_prune() 459 if (lopt->qlen < young) in inet_csk_reqsk_queue_prune() 504 if (lopt->qlen) in inet_csk_reqsk_queue_prune()
|
/net/sunrpc/ |
D | sched.c | 137 queue->qlen++; in __rpc_add_wait_queue() 168 queue->qlen--; in __rpc_remove_wait_queue() 200 queue->qlen = 0; in __rpc_init_priority_wait_queue()
|
/net/atm/ |
D | clip.c | 399 if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) in clip_start_xmit() 488 rq->qlen = 0; in clip_mkip()
|