/net/xdp/ |
D | xsk_queue.h | 91 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() argument 93 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs() 96 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) in xskq_nb_avail() argument 98 u32 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail() 102 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_nb_avail() 103 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail() 109 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) in xskq_nb_free() argument 111 u32 free_entries = q->nentries - (producer - q->cons_tail); in xskq_nb_free() 117 q->cons_tail = READ_ONCE(q->ring->consumer); in xskq_nb_free() 118 return q->nentries - (producer - q->cons_tail); in xskq_nb_free() [all …]
|
/net/sched/ |
D | sch_choke.c | 76 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument 78 return (q->tail - q->head) & q->tab_mask; in choke_len() 82 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument 84 return q->flags & TC_RED_ECN; in use_ecn() 88 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument 90 return q->flags & TC_RED_HARDDROP; in use_harddrop() 94 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument 97 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes() 98 if (q->head == q->tail) in choke_zap_head_holes() 100 } while (q->tab[q->head] == NULL); in choke_zap_head_holes() [all …]
|
D | sch_netem.c | 200 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument 202 struct clgstate *clg = &q->clg; in loss_4state() 265 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument 267 struct clgstate *clg = &q->clg; in loss_gilb_ell() 286 static bool loss_event(struct netem_sched_data *q) in loss_event() argument 288 switch (q->loss_model) { in loss_event() 291 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event() 299 return loss_4state(q); in loss_event() 307 return loss_gilb_ell(q); in loss_event() 345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) in packet_time_ns() argument [all …]
|
D | sch_sfq.c | 150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument 153 return &q->slots[val].dep; in sfq_dep_head() 154 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head() 157 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument 160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash() 166 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local 173 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify() 176 fl = rcu_dereference_bh(q->filter_list); in sfq_classify() 178 return sfq_hash(q, skb) + 1; in sfq_classify() 194 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify() [all …]
|
D | sch_pie.c | 94 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local 96 u64 local_prob = q->vars.prob; in drop_early() 100 if (q->vars.burst_time > 0) in drop_early() 106 if ((q->vars.qdelay < q->params.target / 2) && in drop_early() 107 (q->vars.prob < MAX_PROB / 5)) in drop_early() 119 if (q->params.bytemode && packet_size <= mtu) in drop_early() 122 local_prob = q->vars.prob; in drop_early() 125 q->vars.accu_prob = 0; in drop_early() 126 q->vars.accu_prob_overflows = 0; in drop_early() 129 if (local_prob > MAX_PROB - q->vars.accu_prob) in drop_early() [all …]
|
D | sch_sfb.c | 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) in increment_qlen() argument 144 increment_one_qlen(sfbhash, 0, q); in increment_qlen() 148 increment_one_qlen(sfbhash, 1, q); in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() argument 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument 173 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen() 177 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen() [all …]
|
D | sch_fq_codel.c | 71 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument 74 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash() 80 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local 87 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify() 90 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify() 92 return fq_codel_hash(q, skb) + 1; in fq_codel_classify() 108 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify() 141 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local 155 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop() 156 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop() [all …]
|
D | sch_skbprio.c | 40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q) in calc_new_high_prio() argument 44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio() 45 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_high_prio() 53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q) in calc_new_low_prio() argument 57 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { in calc_new_low_prio() 58 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_low_prio() 72 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() local 81 qdisc = &q->qdiscs[prio]; in skbprio_enqueue() 82 if (sch->q.qlen < sch->limit) { in skbprio_enqueue() 85 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() [all …]
|
D | sch_fq.c | 156 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument 158 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled() 159 q->throttled_flows--; in fq_flow_unset_throttled() 160 fq_flow_add_tail(&q->old_flows, f); in fq_flow_unset_throttled() 163 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument 165 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled() 178 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled() 179 q->throttled_flows++; in fq_flow_set_throttled() 180 q->stat_throttled++; in fq_flow_set_throttled() 183 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled() [all …]
|
D | sch_cbs.c | 99 sch->q.qlen++; in cbs_child_enqueue() 107 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_offload() local 108 struct Qdisc *qdisc = q->qdisc; in cbs_enqueue_offload() 116 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_soft() local 117 struct Qdisc *qdisc = q->qdisc; in cbs_enqueue_soft() 119 if (sch->q.qlen == 0 && q->credits > 0) { in cbs_enqueue_soft() 123 q->credits = 0; in cbs_enqueue_soft() 124 q->last = ktime_get_ns(); in cbs_enqueue_soft() 133 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue() local 135 return q->enqueue(skb, sch, to_free); in cbs_enqueue() [all …]
|
D | sch_red.c | 47 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument 49 return q->flags & TC_RED_ECN; in red_use_ecn() 52 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument 54 return q->flags & TC_RED_HARDDROP; in red_use_harddrop() 60 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local 61 struct Qdisc *child = q->qdisc; in red_enqueue() 65 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue() 66 &q->vars, in red_enqueue() 69 if (red_is_idling(&q->vars)) in red_enqueue() 70 red_end_of_idle_period(&q->vars); in red_enqueue() [all …]
|
D | sch_cake.c | 625 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, in cake_hash() argument 706 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash() 707 q->flows[reduced_hash].set)) { in cake_hash() 708 q->way_directs++; in cake_hash() 721 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash() 723 q->way_hits++; in cake_hash() 725 if (!q->flows[outer_hash + k].set) { in cake_hash() 740 if (!q->flows[outer_hash + k].set) { in cake_hash() 741 q->way_misses++; in cake_hash() 751 q->way_collisions++; in cake_hash() [all …]
|
D | sch_hhf.c | 182 struct hhf_sched_data *q) in seek_list() argument 191 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list() 201 q->hh_flows_current_cnt--; in seek_list() 213 struct hhf_sched_data *q) in alloc_new_hh() argument 221 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh() 228 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh() 229 q->hh_flows_overlimit++; in alloc_new_hh() 237 q->hh_flows_current_cnt++; in alloc_new_hh() 249 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_classify() local 259 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify() [all …]
|
D | sch_qfq.c | 208 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local 211 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class() 253 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() argument 257 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg() 263 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() argument 268 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg() 277 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() argument 282 if (new_num_classes == q->max_agg_classes) in qfq_update_agg() 286 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg() 287 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_update_agg() [all …]
|
D | sch_multiq.c | 32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local 35 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify() 53 if (band >= q->bands) in multiq_classify() 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 79 sch->q.qlen++; in multiq_enqueue() 89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local 94 for (band = 0; band < q->bands; band++) { in multiq_dequeue() 96 q->curband++; in multiq_dequeue() 97 if (q->curband >= q->bands) in multiq_dequeue() [all …]
|
D | sch_tbf.c | 146 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() local 163 ret = qdisc_enqueue(segs, q->qdisc, to_free); in tbf_segment() 172 sch->q.qlen += nb; in tbf_segment() 182 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local 186 if (qdisc_pkt_len(skb) > q->max_size) { in tbf_enqueue() 188 skb_gso_validate_mac_len(skb, q->max_size)) in tbf_enqueue() 192 ret = qdisc_enqueue(skb, q->qdisc, to_free); in tbf_enqueue() 200 sch->q.qlen++; in tbf_enqueue() 204 static bool tbf_peak_present(const struct tbf_sched_data *q) in tbf_peak_present() argument 206 return q->peak.rate_bytes_ps; in tbf_peak_present() [all …]
|
D | sch_plug.c | 90 struct plug_sched_data *q = qdisc_priv(sch); in plug_enqueue() local 92 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue() 93 if (!q->unplug_indefinite) in plug_enqueue() 94 q->pkts_current_epoch++; in plug_enqueue() 103 struct plug_sched_data *q = qdisc_priv(sch); in plug_dequeue() local 105 if (q->throttled) in plug_dequeue() 108 if (!q->unplug_indefinite) { in plug_dequeue() 109 if (!q->pkts_to_release) { in plug_dequeue() 113 q->throttled = true; in plug_dequeue() 116 q->pkts_to_release--; in plug_dequeue() [all …]
|
D | sch_etf.c | 77 struct etf_sched_data *q = qdisc_priv(sch); in is_packet_valid() local 82 if (q->skip_sock_check) in is_packet_valid() 94 if (sk->sk_clockid != q->clockid) in is_packet_valid() 97 if (sk->sk_txtime_deadline_mode != q->deadline_mode) in is_packet_valid() 101 now = q->get_time(); in is_packet_valid() 102 if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) in is_packet_valid() 110 struct etf_sched_data *q = qdisc_priv(sch); in etf_peek_timesortedlist() local 113 p = rb_first_cached(&q->head); in etf_peek_timesortedlist() 122 struct etf_sched_data *q = qdisc_priv(sch); in reset_watchdog() local 127 qdisc_watchdog_cancel(&q->watchdog); in reset_watchdog() [all …]
|
D | sch_ingress.c | 50 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_tcf_block() local 52 return q->block; in ingress_tcf_block() 64 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_ingress_block_set() local 66 q->block_info.block_index = block_index; in ingress_ingress_block_set() 71 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_ingress_block_get() local 73 return q->block_info.block_index; in ingress_ingress_block_get() 79 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_init() local 87 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); in ingress_init() 89 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; in ingress_init() 90 q->block_info.chain_head_change = clsact_chain_head_change; in ingress_init() [all …]
|
D | sch_gred.c | 98 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local 101 if (q == NULL) in gred_wred_mode_check() 105 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check() 113 struct gred_sched_data *q, in gred_backlog() argument 119 return q->backlog; in gred_backlog() 128 struct gred_sched_data *q) in gred_load_wred_set() argument 130 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set() 131 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set() 135 struct gred_sched_data *q) in gred_store_wred_set() argument 137 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set() [all …]
|
D | sch_codel.c | 72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); in dequeue_func() 91 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local 94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue() 95 &q->stats, qdisc_pkt_len, codel_get_enqueue_time, in codel_qdisc_dequeue() 101 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue() 102 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); in codel_qdisc_dequeue() 103 q->stats.drop_count = 0; in codel_qdisc_dequeue() 104 q->stats.drop_len = 0; in codel_qdisc_dequeue() 114 struct codel_sched_data *q; in codel_qdisc_enqueue() local 120 q = qdisc_priv(sch); in codel_qdisc_enqueue() [all …]
|
D | sch_htb.c | 126 struct Qdisc *q; member 182 struct htb_sched *q = qdisc_priv(sch); in htb_find() local 185 clc = qdisc_class_find(&q->clhash, handle); in htb_find() 212 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local 231 tcf = rcu_dereference_bh(q->filter_list); in htb_classify() 262 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify() 300 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument 303 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree() 305 cl->pq_key = q->now + delay; in htb_add_to_wait_tree() 306 if (cl->pq_key == q->now) in htb_add_to_wait_tree() [all …]
|
D | sch_taprio.c | 98 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) in taprio_mono_to_any() argument 101 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); in taprio_mono_to_any() 111 static ktime_t taprio_get_time(const struct taprio_sched *q) in taprio_get_time() argument 113 return taprio_mono_to_any(q, ktime_get()); in taprio_get_time() 132 static void switch_schedules(struct taprio_sched *q, in switch_schedules() argument 136 rcu_assign_pointer(q->oper_sched, *admin); in switch_schedules() 137 rcu_assign_pointer(q->admin_sched, NULL); in switch_schedules() 180 static int length_to_duration(struct taprio_sched *q, int len) in length_to_duration() argument 182 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000); in length_to_duration() 201 struct taprio_sched *q = qdisc_priv(sch); in find_entry_to_transmit() local [all …]
|
D | sch_prio.c | 33 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local 41 fl = rcu_dereference_bh(q->filter_list); in prio_classify() 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 62 if (band >= q->bands) in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 89 sch->q.qlen++; in prio_enqueue() 99 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local 102 for (prio = 0; prio < q->bands; prio++) { in prio_peek() 103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() [all …]
|
/net/ipv4/ |
D | inet_fragment.c | 54 static void fragrun_append_to_last(struct inet_frag_queue *q, in fragrun_append_to_last() argument 59 FRAG_CB(q->last_run_head)->frag_run_len += skb->len; in fragrun_append_to_last() 60 FRAG_CB(q->fragments_tail)->next_frag = skb; in fragrun_append_to_last() 61 q->fragments_tail = skb; in fragrun_append_to_last() 65 static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb) in fragrun_create() argument 70 if (q->last_run_head) in fragrun_create() 71 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, in fragrun_create() 72 &q->last_run_head->rbnode.rb_right); in fragrun_create() 74 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); in fragrun_create() 75 rb_insert_color(&skb->rbnode, &q->rb_fragments); in fragrun_create() [all …]
|