Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 83) sorted by relevance

1234

/net/sched/
Dsch_choke.c80 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
82 return (q->tail - q->head) & q->tab_mask; in choke_len()
86 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
88 return q->flags & TC_RED_ECN; in use_ecn()
92 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
94 return q->flags & TC_RED_HARDDROP; in use_harddrop()
98 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
101 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
102 if (q->head == q->tail) in choke_zap_head_holes()
104 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
Dsch_sfq.c152 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
155 return &q->slots[val].dep; in sfq_dep_head()
156 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
173 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
181 (__force u32)keys->ports, q->perturbation); in sfq_hash()
182 return hash & (q->divisor - 1); in sfq_hash()
188 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
195 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
198 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
201 return sfq_hash(q, skb) + 1; in sfq_classify()
[all …]
Dsch_pie.c100 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local
102 u32 local_prob = q->vars.prob; in drop_early()
106 if (q->vars.burst_time > 0) in drop_early()
112 if ((q->vars.qdelay < q->params.target / 2) in drop_early()
113 && (q->vars.prob < MAX_PROB / 5)) in drop_early()
125 if (q->params.bytemode && packet_size <= mtu) in drop_early()
128 local_prob = q->vars.prob; in drop_early()
139 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() local
143 q->stats.overlimit++; in pie_qdisc_enqueue()
149 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
[all …]
Dsch_sfb.c126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
129 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument
147 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
151 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
155 struct sfb_sched_data *q) in decrement_one_qlen() argument
158 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
176 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
180 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
[all …]
Dsch_red.c49 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
51 return q->flags & TC_RED_ECN; in red_use_ecn()
54 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
56 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
61 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
62 struct Qdisc *child = q->qdisc; in red_enqueue()
65 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
66 &q->vars, in red_enqueue()
69 if (red_is_idling(&q->vars)) in red_enqueue()
70 red_end_of_idle_period(&q->vars); in red_enqueue()
[all …]
Dsch_netem.c210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
212 struct clgstate *clg = &q->clg; in loss_4state()
275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
277 struct clgstate *clg = &q->clg; in loss_gilb_ell()
296 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
298 switch (q->loss_model) { in loss_event()
301 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
309 return loss_4state(q); in loss_event()
317 return loss_gilb_ell(q); in loss_event()
355 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
[all …]
Dsch_fq_codel.c70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
79 (__force u32)keys.ports, q->perturbation); in fq_codel_hash()
81 return reciprocal_scale(hash, q->flows_cnt); in fq_codel_hash()
87 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
94 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
97 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
99 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
113 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
145 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
155 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
[all …]
Dsch_cbq.c115 struct Qdisc *q; /* Elementary queueing discipline */ member
178 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
182 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
219 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
220 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
250 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
307 cl_tail = q->active[prio]; in cbq_activate_class()
308 q->active[prio] = cl; in cbq_activate_class()
[all …]
Dsch_multiq.c42 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify()
61 if (band >= q->bands) in multiq_classify()
62 return q->queues[0]; in multiq_classify()
64 return q->queues[band]; in multiq_classify()
86 sch->q.qlen++; in multiq_enqueue()
96 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
101 for (band = 0; band < q->bands; band++) { in multiq_dequeue()
103 q->curband++; in multiq_dequeue()
104 if (q->curband >= q->bands) in multiq_dequeue()
[all …]
Dsch_fq.c127 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
129 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
142 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
143 q->throttled_flows++; in fq_flow_set_throttled()
144 q->stat_throttled++; in fq_flow_set_throttled()
147 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
148 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
174 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
203 q->flows -= fcnt; in fq_gc()
204 q->inactive_flows -= fcnt; in fq_gc()
[all …]
Dsch_tbf.c160 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() local
177 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
186 sch->q.qlen += nb; in tbf_segment()
195 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local
198 if (qdisc_pkt_len(skb) > q->max_size) { in tbf_enqueue()
199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) in tbf_enqueue()
203 ret = qdisc_enqueue(skb, q->qdisc); in tbf_enqueue()
210 sch->q.qlen++; in tbf_enqueue()
216 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_drop() local
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { in tbf_drop()
[all …]
Dsch_hhf.c179 static unsigned int skb_hash(const struct hhf_sched_data *q, in skb_hash() argument
191 (__force u32)keys.ports, q->perturbation); in skb_hash()
198 struct hhf_sched_data *q) in seek_list() argument
207 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
217 q->hh_flows_current_cnt--; in seek_list()
229 struct hhf_sched_data *q) in alloc_new_hh() argument
237 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh()
244 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
245 q->hh_flows_overlimit++; in alloc_new_hh()
253 q->hh_flows_current_cnt++; in alloc_new_hh()
[all …]
Dsch_qfq.c212 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local
215 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class()
223 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue()
266 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() argument
270 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg()
276 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() argument
281 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg()
290 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() argument
295 if (new_num_classes == q->max_agg_classes) in qfq_update_agg()
299 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg()
[all …]
Dsch_gred.c99 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local
102 if (q == NULL) in gred_wred_mode_check()
106 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check()
114 struct gred_sched_data *q, in gred_backlog() argument
120 return q->backlog; in gred_backlog()
129 struct gred_sched_data *q) in gred_load_wred_set() argument
131 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set()
132 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set()
136 struct gred_sched_data *q) in gred_store_wred_set() argument
138 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set()
[all …]
Dsch_prio.c36 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local
44 fl = rcu_dereference_bh(q->filter_list); in prio_classify()
58 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 if (band >= q->bands) in prio_classify()
64 return q->queues[q->prio2band[0]]; in prio_classify()
66 return q->queues[band]; in prio_classify()
88 sch->q.qlen++; in prio_enqueue()
98 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local
101 for (prio = 0; prio < q->bands; prio++) { in prio_peek()
102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
[all …]
Dsch_codel.c69 struct sk_buff *skb = __skb_dequeue(&sch->q); in dequeue()
77 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); in codel_qdisc_dequeue()
85 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue()
86 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); in codel_qdisc_dequeue()
87 q->stats.drop_count = 0; in codel_qdisc_dequeue()
88 q->stats.drop_len = 0; in codel_qdisc_dequeue()
97 struct codel_sched_data *q; in codel_qdisc_enqueue() local
103 q = qdisc_priv(sch); in codel_qdisc_enqueue()
104 q->drop_overlimit++; in codel_qdisc_enqueue()
[all …]
Dsch_plug.c91 struct plug_sched_data *q = qdisc_priv(sch); in plug_enqueue() local
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
94 if (!q->unplug_indefinite) in plug_enqueue()
95 q->pkts_current_epoch++; in plug_enqueue()
104 struct plug_sched_data *q = qdisc_priv(sch); in plug_dequeue() local
109 if (!q->unplug_indefinite) { in plug_dequeue()
110 if (!q->pkts_to_release) { in plug_dequeue()
117 q->pkts_to_release--; in plug_dequeue()
125 struct plug_sched_data *q = qdisc_priv(sch); in plug_init() local
127 q->pkts_current_epoch = 0; in plug_init()
[all …]
Dsch_htb.c131 struct Qdisc *q; member
183 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
186 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
209 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
295 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
[all …]
Dsch_api.c42 struct nlmsghdr *n, struct Qdisc *q,
142 struct Qdisc_ops *q, **qp; in register_qdisc() local
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc()
147 if (!strcmp(qops->id, q->id)) in register_qdisc()
186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc()
191 if (q == qops) in unregister_qdisc()
193 if (q) { in unregister_qdisc()
194 *qp = q->next; in unregister_qdisc()
195 q->next = NULL; in unregister_qdisc()
[all …]
Dsch_teql.c70 struct sk_buff_head q; member
73 #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) argument
83 struct teql_sched_data *q = qdisc_priv(sch); in teql_enqueue() local
85 if (q->q.qlen < dev->tx_queue_len) { in teql_enqueue()
86 __skb_queue_tail(&q->q, skb); in teql_enqueue()
99 struct Qdisc *q; in teql_dequeue() local
101 skb = __skb_dequeue(&dat->q); in teql_dequeue()
103 q = rcu_dereference_bh(dat_queue->qdisc); in teql_dequeue()
106 struct net_device *m = qdisc_dev(q); in teql_dequeue()
114 sch->q.qlen = dat->q.qlen + q->q.qlen; in teql_dequeue()
[all …]
/net/sctp/
Doutqueue.c56 static void sctp_check_transmitted(struct sctp_outq *q,
63 static void sctp_mark_missing(struct sctp_outq *q,
69 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
71 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
74 static inline void sctp_outq_head_data(struct sctp_outq *q, in sctp_outq_head_data() argument
77 list_add(&ch->list, &q->out_chunk_list); in sctp_outq_head_data()
78 q->out_qlen += ch->skb->len; in sctp_outq_head_data()
82 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) in sctp_outq_dequeue_data() argument
86 if (!list_empty(&q->out_chunk_list)) { in sctp_outq_dequeue_data()
87 struct list_head *entry = q->out_chunk_list.next; in sctp_outq_dequeue_data()
[all …]
/net/ipv4/
Dip_fragment.c70 struct inet_frag_queue q; member
111 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) in ip4_hashfn() argument
115 ipq = container_of(q, struct ipq, q); in ip4_hashfn()
119 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) in ip4_frag_match() argument
124 qp = container_of(q, struct ipq, q); in ip4_frag_match()
132 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) in ip4_frag_init() argument
134 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init()
135 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, in ip4_frag_init()
151 static __inline__ void ip4_frag_free(struct inet_frag_queue *q) in ip4_frag_free() argument
155 qp = container_of(q, struct ipq, q); in ip4_frag_free()
[all …]
Dinet_fragment.c56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) in inet_frag_hashfn() argument
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); in inet_frag_hashfn()
80 struct inet_frag_queue *q; in inet_frag_secret_rebuild() local
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) { in inet_frag_secret_rebuild()
87 unsigned int hval = inet_frag_hashfn(f, q); in inet_frag_secret_rebuild()
92 hlist_del(&q->list); in inet_frag_secret_rebuild()
107 hlist_add_head(&q->list, &hb_dest->chain); in inet_frag_secret_rebuild()
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q) in inet_fragq_should_evict() argument
122 return q->net->low_thresh == 0 || in inet_fragq_should_evict()
123 frag_mem_limit(q->net) >= q->net->low_thresh; in inet_fragq_should_evict()
[all …]
/net/ieee802154/
Dreassembly.c62 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) in lowpan_hashfn() argument
66 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_hashfn()
70 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) in lowpan_frag_match() argument
75 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_match()
81 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) in lowpan_frag_init() argument
86 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_init()
99 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in lowpan_frag_expire()
100 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); in lowpan_frag_expire()
102 spin_lock(&fq->q.lock); in lowpan_frag_expire()
104 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire()
[all …]
/net/ipv6/
Dreassembly.c94 static unsigned int ip6_hashfn(const struct inet_frag_queue *q) in ip6_hashfn() argument
98 fq = container_of(q, struct frag_queue, q); in ip6_hashfn()
102 bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) in ip6_frag_match() argument
107 fq = container_of(q, struct frag_queue, q); in ip6_frag_match()
118 void ip6_frag_init(struct inet_frag_queue *q, const void *a) in ip6_frag_init() argument
120 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6_frag_init()
136 spin_lock(&fq->q.lock); in ip6_expire_frag_queue()
138 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_expire_frag_queue()
141 inet_frag_kill(&fq->q, frags); in ip6_expire_frag_queue()
150 if (fq->q.flags & INET_FRAG_EVICTED) in ip6_expire_frag_queue()
[all …]

1234