Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 79) sorted by relevance

1234

/net/sched/
Dsch_choke.c87 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
89 return (q->tail - q->head) & q->tab_mask; in choke_len()
93 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
95 return q->flags & TC_RED_ECN; in use_ecn()
99 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
101 return q->flags & TC_RED_HARDDROP; in use_harddrop()
105 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
108 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
109 if (q->head == q->tail) in choke_zap_head_holes()
111 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
Dsch_sfq.c152 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
155 return &q->slots[val].dep; in sfq_dep_head()
156 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
173 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
181 (__force u32)keys->ports, q->perturbation); in sfq_hash()
182 return hash & (q->divisor - 1); in sfq_hash()
188 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
194 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
197 if (!q->filter_list) { in sfq_classify()
199 return sfq_hash(q, skb) + 1; in sfq_classify()
[all …]
Dsch_sfb.c126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
129 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument
147 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
151 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
155 struct sfb_sched_data *q) in decrement_one_qlen() argument
158 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
176 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
180 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
[all …]
Dsch_red.c49 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
51 return q->flags & TC_RED_ECN; in red_use_ecn()
54 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
56 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
61 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
62 struct Qdisc *child = q->qdisc; in red_enqueue()
65 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
66 &q->vars, in red_enqueue()
69 if (red_is_idling(&q->vars)) in red_enqueue()
70 red_end_of_idle_period(&q->vars); in red_enqueue()
[all …]
Dsch_netem.c171 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
173 struct clgstate *clg = &q->clg; in loss_4state()
235 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
237 struct clgstate *clg = &q->clg; in loss_gilb_ell()
255 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
257 switch (q->loss_model) { in loss_event()
260 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
268 return loss_4state(q); in loss_event()
276 return loss_gilb_ell(q); in loss_event()
314 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
[all …]
Dsch_cbq.c115 struct Qdisc *q; /* Elementary queueing discipline */ member
179 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
183 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
220 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
221 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
249 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
302 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
306 cl_tail = q->active[prio]; in cbq_activate_class()
307 q->active[prio] = cl; in cbq_activate_class()
[all …]
Dsch_tbf.c121 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local
124 if (qdisc_pkt_len(skb) > q->max_size) in tbf_enqueue()
127 ret = qdisc_enqueue(skb, q->qdisc); in tbf_enqueue()
134 sch->q.qlen++; in tbf_enqueue()
140 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_drop() local
143 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { in tbf_drop()
144 sch->q.qlen--; in tbf_drop()
152 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_dequeue() local
155 skb = q->qdisc->ops->peek(q->qdisc); in tbf_dequeue()
164 toks = min_t(s64, now - q->t_c, q->buffer); in tbf_dequeue()
[all …]
Dsch_multiq.c43 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
49 err = tc_classify(skb, q->filter_list, &res); in multiq_classify()
61 if (band >= q->bands) in multiq_classify()
62 return q->queues[0]; in multiq_classify()
64 return q->queues[band]; in multiq_classify()
86 sch->q.qlen++; in multiq_enqueue()
96 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
101 for (band = 0; band < q->bands; band++) { in multiq_dequeue()
103 q->curband++; in multiq_dequeue()
104 if (q->curband >= q->bands) in multiq_dequeue()
[all …]
Dsch_fq_codel.c70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
79 (__force u32)keys.ports, q->perturbation); in fq_codel_hash()
80 return ((u64)hash * q->flows_cnt) >> 32; in fq_codel_hash()
86 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
92 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
95 if (!q->filter_list) in fq_codel_classify()
96 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
99 result = tc_classify(skb, q->filter_list, &res); in fq_codel_classify()
110 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
142 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
[all …]
Dsch_qfq.c212 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local
215 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class()
223 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue()
265 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() argument
269 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg()
275 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() argument
280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg()
289 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() argument
294 if (new_num_classes == q->max_agg_classes) in qfq_update_agg()
298 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg()
[all …]
Dsch_gred.c99 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local
102 if (q == NULL) in gred_wred_mode_check()
106 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check()
114 struct gred_sched_data *q, in gred_backlog() argument
120 return q->backlog; in gred_backlog()
129 struct gred_sched_data *q) in gred_load_wred_set() argument
131 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set()
132 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set()
136 struct gred_sched_data *q) in gred_store_wred_set() argument
138 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set()
[all …]
Dsch_htb.c95 struct Qdisc *q; member
169 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
172 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
195 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
212 tcf = q->filter_list; in htb_classify()
238 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
276 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
279 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; in htb_add_to_wait_tree()
281 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
282 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
[all …]
Dsch_prio.c36 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local
43 err = tc_classify(skb, q->filter_list, &res); in prio_classify()
53 if (!q->filter_list || err < 0) { in prio_classify()
56 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
61 if (band >= q->bands) in prio_classify()
62 return q->queues[q->prio2band[0]]; in prio_classify()
64 return q->queues[band]; in prio_classify()
86 sch->q.qlen++; in prio_enqueue()
96 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local
99 for (prio = 0; prio < q->bands; prio++) { in prio_peek()
[all …]
Dsch_codel.c69 struct sk_buff *skb = __skb_dequeue(&sch->q); in dequeue()
77 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); in codel_qdisc_dequeue()
85 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue()
86 qdisc_tree_decrease_qlen(sch, q->stats.drop_count); in codel_qdisc_dequeue()
87 q->stats.drop_count = 0; in codel_qdisc_dequeue()
96 struct codel_sched_data *q; in codel_qdisc_enqueue() local
102 q = qdisc_priv(sch); in codel_qdisc_enqueue()
103 q->drop_overlimit++; in codel_qdisc_enqueue()
116 struct codel_sched_data *q = qdisc_priv(sch); in codel_change() local
[all …]
Dsch_plug.c91 struct plug_sched_data *q = qdisc_priv(sch); in plug_enqueue() local
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
94 if (!q->unplug_indefinite) in plug_enqueue()
95 q->pkts_current_epoch++; in plug_enqueue()
104 struct plug_sched_data *q = qdisc_priv(sch); in plug_dequeue() local
109 if (!q->unplug_indefinite) { in plug_dequeue()
110 if (!q->pkts_to_release) { in plug_dequeue()
117 q->pkts_to_release--; in plug_dequeue()
125 struct plug_sched_data *q = qdisc_priv(sch); in plug_init() local
127 q->pkts_current_epoch = 0; in plug_init()
[all …]
Dsch_api.c42 struct nlmsghdr *n, struct Qdisc *q,
142 struct Qdisc_ops *q, **qp; in register_qdisc() local
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc()
147 if (!strcmp(qops->id, q->id)) in register_qdisc()
186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc()
191 if (q == qops) in unregister_qdisc()
193 if (q) { in unregister_qdisc()
194 *qp = q->next; in unregister_qdisc()
195 q->next = NULL; in unregister_qdisc()
[all …]
Dsch_teql.c70 struct sk_buff_head q; member
73 #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) argument
83 struct teql_sched_data *q = qdisc_priv(sch); in teql_enqueue() local
85 if (q->q.qlen < dev->tx_queue_len) { in teql_enqueue()
86 __skb_queue_tail(&q->q, skb); in teql_enqueue()
100 skb = __skb_dequeue(&dat->q); in teql_dequeue()
111 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; in teql_dequeue()
134 skb_queue_purge(&dat->q); in teql_reset()
135 sch->q.qlen = 0; in teql_reset()
141 struct Qdisc *q, *prev; in teql_destroy() local
[all …]
Dsch_drr.c44 struct drr_sched *q = qdisc_priv(sch); in drr_find_class() local
47 clc = qdisc_class_find(&q->clhash, classid); in drr_find_class()
55 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue()
68 struct drr_sched *q = qdisc_priv(sch); in drr_change_class() local
130 qdisc_class_hash_insert(&q->clhash, &cl->common); in drr_change_class()
133 qdisc_class_hash_grow(sch, &q->clhash); in drr_change_class()
148 struct drr_sched *q = qdisc_priv(sch); in drr_delete_class() local
157 qdisc_class_hash_remove(&q->clhash, &cl->common); in drr_delete_class()
189 struct drr_sched *q = qdisc_priv(sch); in drr_tcf_chain() local
194 return &q->filter_list; in drr_tcf_chain()
[all …]
/net/sctp/
Doutqueue.c63 static void sctp_check_transmitted(struct sctp_outq *q,
70 static void sctp_mark_missing(struct sctp_outq *q,
76 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
78 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
81 static inline void sctp_outq_head_data(struct sctp_outq *q, in sctp_outq_head_data() argument
84 list_add(&ch->list, &q->out_chunk_list); in sctp_outq_head_data()
85 q->out_qlen += ch->skb->len; in sctp_outq_head_data()
89 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) in sctp_outq_dequeue_data() argument
93 if (!list_empty(&q->out_chunk_list)) { in sctp_outq_dequeue_data()
94 struct list_head *entry = q->out_chunk_list.next; in sctp_outq_dequeue_data()
[all …]
/net/ipv4/
Dinet_fragment.c61 struct inet_frag_queue *q; in inet_frag_secret_rebuild() local
65 hlist_for_each_entry_safe(q, n, &hb->chain, list) { in inet_frag_secret_rebuild()
66 unsigned int hval = f->hashfn(q); in inet_frag_secret_rebuild()
71 hlist_del(&q->list); in inet_frag_secret_rebuild()
75 hlist_add_head(&q->list, &hb_dest->chain); in inet_frag_secret_rebuild()
171 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, in inet_frag_destroy() argument
178 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); in inet_frag_destroy()
179 WARN_ON(del_timer(&q->timer) != 0); in inet_frag_destroy()
182 fp = q->fragments; in inet_frag_destroy()
183 nf = q->net; in inet_frag_destroy()
[all …]
Dip_fragment.c69 struct inet_frag_queue q; member
114 static unsigned int ip4_hashfn(struct inet_frag_queue *q) in ip4_hashfn() argument
118 ipq = container_of(q, struct ipq, q); in ip4_hashfn()
122 static bool ip4_frag_match(struct inet_frag_queue *q, void *a) in ip4_frag_match() argument
127 qp = container_of(q, struct ipq, q); in ip4_frag_match()
135 static void ip4_frag_init(struct inet_frag_queue *q, void *a) in ip4_frag_init() argument
137 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init()
138 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, in ip4_frag_init()
154 static __inline__ void ip4_frag_free(struct inet_frag_queue *q) in ip4_frag_free() argument
158 qp = container_of(q, struct ipq, q); in ip4_frag_free()
[all …]
/net/ipv6/
Dreassembly.c97 static unsigned int ip6_hashfn(struct inet_frag_queue *q) in ip6_hashfn() argument
101 fq = container_of(q, struct frag_queue, q); in ip6_hashfn()
105 bool ip6_frag_match(struct inet_frag_queue *q, void *a) in ip6_frag_match() argument
110 fq = container_of(q, struct frag_queue, q); in ip6_frag_match()
118 void ip6_frag_init(struct inet_frag_queue *q, void *a) in ip6_frag_init() argument
120 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6_frag_init()
136 spin_lock(&fq->q.lock); in ip6_expire_frag_queue()
138 if (fq->q.last_in & INET_FRAG_COMPLETE) in ip6_expire_frag_queue()
141 inet_frag_kill(&fq->q, frags); in ip6_expire_frag_queue()
152 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) in ip6_expire_frag_queue()
[all …]
/net/ipv6/netfilter/
Dnf_conntrack_reasm.c147 static unsigned int nf_hashfn(struct inet_frag_queue *q) in nf_hashfn() argument
151 nq = container_of(q, struct frag_queue, q); in nf_hashfn()
166 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in nf_ct_frag6_expire()
167 net = container_of(fq->q.net, struct net, nf_frag.frags); in nf_ct_frag6_expire()
177 struct inet_frag_queue *q; in fq_find() local
190 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); in fq_find()
192 if (IS_ERR_OR_NULL(q)) { in fq_find()
193 inet_frag_maybe_warn_overflow(q, pr_fmt()); in fq_find()
196 return container_of(q, struct frag_queue, q); in fq_find()
208 if (fq->q.last_in & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue()
[all …]
/net/netfilter/
Dxt_quota2.c163 q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon) in q2_new_counter() argument
174 e->quota = q->quota; in q2_new_counter()
179 strlcpy(e->name, q->name, sizeof(e->name)); in q2_new_counter()
189 q2_get_counter(const struct xt_quota_mtinfo2 *q) in q2_get_counter() argument
195 if (*q->name == '\0') in q2_get_counter()
196 return q2_new_counter(q, true); in q2_get_counter()
199 new_e = q2_new_counter(q, false); in q2_get_counter()
205 if (strcmp(e->name, q->name) == 0) { in q2_get_counter()
244 struct xt_quota_mtinfo2 *q = par->matchinfo; in quota_mt2_check() local
246 pr_debug("xt_quota2: check() flags=0x%04x", q->flags); in quota_mt2_check()
[all …]
Dxt_quota.c28 struct xt_quota_info *q = (void *)par->matchinfo; in quota_mt() local
29 struct xt_quota_priv *priv = q->master; in quota_mt()
30 bool ret = q->flags & XT_QUOTA_INVERT; in quota_mt()
47 struct xt_quota_info *q = par->matchinfo; in quota_mt_check() local
49 if (q->flags & ~XT_QUOTA_MASK) in quota_mt_check()
52 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); in quota_mt_check()
53 if (q->master == NULL) in quota_mt_check()
56 spin_lock_init(&q->master->lock); in quota_mt_check()
57 q->master->quota = q->quota; in quota_mt_check()
63 const struct xt_quota_info *q = par->matchinfo; in quota_mt_destroy() local
[all …]

1234