/net/xdp/ |
D | xsk_queue.h | 91 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() 96 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) in xskq_nb_avail() 109 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) in xskq_nb_free() 121 static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt) in xskq_has_addrs() 148 static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) in xskq_is_valid_addr() 158 static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, in xskq_is_valid_addr_unaligned() 174 static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, in xskq_validate_addr() 201 static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr, in xskq_peek_addr() 216 static inline void xskq_discard_addr(struct xsk_queue *q) in xskq_discard_addr() 221 static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) in xskq_produce_addr() [all …]
|
D | xsk_queue.c | 12 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask) in xskq_set_umem() 21 static u32 xskq_umem_get_ring_size(struct xsk_queue *q) in xskq_umem_get_ring_size() 26 static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q) in xskq_rxtx_get_ring_size() 33 struct xsk_queue *q; in xskq_create() local 59 void xskq_destroy(struct xsk_queue *q) in xskq_destroy()
|
/net/sched/ |
D | sch_red.c | 47 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() 52 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() 60 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local 115 struct red_sched_data *q = qdisc_priv(sch); in red_dequeue() local 132 struct red_sched_data *q = qdisc_priv(sch); in red_peek() local 140 struct red_sched_data *q = qdisc_priv(sch); in red_reset() local 150 struct red_sched_data *q = qdisc_priv(sch); in red_offload() local 178 struct red_sched_data *q = qdisc_priv(sch); in red_destroy() local 195 struct red_sched_data *q = qdisc_priv(sch); in red_change() local 263 struct red_sched_data *q = from_timer(q, t, adapt_timer); in red_adaptative_timer() local [all …]
|
D | sch_multiq.c | 32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local 89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local 120 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_peek() local 151 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_reset() local 163 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_destroy() local 175 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_tune() local 239 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_init() local 264 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dump() local 284 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_graft() local 297 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_leaf() local [all …]
|
D | sch_prio.c | 33 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local 99 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local 113 struct prio_sched_data *q = qdisc_priv(sch); in prio_dequeue() local 134 struct prio_sched_data *q = qdisc_priv(sch); in prio_reset() local 170 struct prio_sched_data *q = qdisc_priv(sch); in prio_destroy() local 181 struct prio_sched_data *q = qdisc_priv(sch); in prio_tune() local 234 struct prio_sched_data *q = qdisc_priv(sch); in prio_init() local 266 struct prio_sched_data *q = qdisc_priv(sch); in prio_dump() local 291 struct prio_sched_data *q = qdisc_priv(sch); in prio_graft() local 321 struct prio_sched_data *q = qdisc_priv(sch); in prio_leaf() local [all …]
|
D | sch_choke.c | 76 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() 82 static int use_ecn(const struct choke_sched_data *q) in use_ecn() 88 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() 94 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() 104 static void choke_zap_tail_holes(struct choke_sched_data *q) in choke_zap_tail_holes() 117 struct choke_sched_data *q = qdisc_priv(sch); in choke_drop_by_idx() local 186 static struct sk_buff *choke_peek_random(const struct choke_sched_data *q, in choke_peek_random() 206 static bool choke_match_random(const struct choke_sched_data *q, in choke_match_random() 222 struct choke_sched_data *q = qdisc_priv(sch); in choke_enqueue() local 292 struct choke_sched_data *q = qdisc_priv(sch); in choke_dequeue() local [all …]
|
D | sch_etf.c | 77 struct etf_sched_data *q = qdisc_priv(sch); in is_packet_valid() local 110 struct etf_sched_data *q = qdisc_priv(sch); in etf_peek_timesortedlist() local 122 struct etf_sched_data *q = qdisc_priv(sch); in reset_watchdog() local 164 struct etf_sched_data *q = qdisc_priv(sch); in etf_enqueue_timesortedlist() local 202 struct etf_sched_data *q = qdisc_priv(sch); in timesortedlist_drop() local 232 struct etf_sched_data *q = qdisc_priv(sch); in timesortedlist_remove() local 254 struct etf_sched_data *q = qdisc_priv(sch); in etf_dequeue_timesortedlist() local 296 struct etf_sched_data *q) in etf_disable_offload() 318 static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, in etf_enable_offload() 348 struct etf_sched_data *q = qdisc_priv(sch); in etf_init() local [all …]
|
D | sch_ingress.c | 50 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_tcf_block() local 64 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_ingress_block_set() local 71 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_ingress_block_get() local 79 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_init() local 95 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_destroy() local 168 struct clsact_sched_data *q = qdisc_priv(sch); in clsact_tcf_block() local 182 struct clsact_sched_data *q = qdisc_priv(sch); in clsact_ingress_block_set() local 189 struct clsact_sched_data *q = qdisc_priv(sch); in clsact_egress_block_set() local 196 struct clsact_sched_data *q = qdisc_priv(sch); in clsact_ingress_block_get() local 203 struct clsact_sched_data *q = qdisc_priv(sch); in clsact_egress_block_get() local [all …]
|
D | sch_cbs.c | 107 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_offload() local 116 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_soft() local 133 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue() local 177 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_dequeue_soft() local 222 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_dequeue_offload() local 230 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_dequeue() local 240 struct cbs_sched_data *q) in cbs_disable_offload() 265 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, in cbs_enable_offload() 298 static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) in cbs_set_port_rate() 325 struct cbs_sched_data *q; in cbs_dev_notifier() local [all …]
|
D | sch_cbq.c | 103 struct Qdisc *q; /* Elementary queueing discipline */ member 166 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() 207 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local 293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local 317 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_deactivate_class() local 342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() 362 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_enqueue() local 396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_overlimit() local 444 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, in cbq_undelay_prio() 481 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, in cbq_undelay() local [all …]
|
D | sch_sfb.c | 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() 138 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() 180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) in decrement_prob() 185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) in increment_prob() 190 static void sfb_zero_all_buckets(struct sfb_sched_data *q) in sfb_zero_all_buckets() 198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) in sfb_compute_qlen() 218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) in sfb_init_perturbation() 224 static void sfb_swap_slot(struct sfb_sched_data *q) in sfb_swap_slot() [all …]
|
D | sch_sfq.c | 150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() 157 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() 166 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local 203 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) in sfq_link() 219 #define sfq_unlink(q, x, n, p) \ argument 228 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) in sfq_dec() 241 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x) in sfq_inc() 295 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_drop() local 329 static int sfq_prob_mark(const struct sfq_sched_data *q) in sfq_prob_mark() 335 static int sfq_hard_mark(const struct sfq_sched_data *q) in sfq_hard_mark() [all …]
|
D | sch_qfq.c | 208 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local 253 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() 263 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() 277 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() 311 static void qfq_add_to_agg(struct qfq_sched *q, in qfq_add_to_agg() 328 static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) in qfq_destroy_agg() 341 static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) in qfq_deactivate_class() 352 static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) in qfq_rm_from_agg() 365 static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) in qfq_deact_rm_from_agg() 377 struct qfq_sched *q = qdisc_priv(sch); in qfq_change_agg() local [all …]
|
D | sch_tbf.c | 146 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() local 182 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local 204 static bool tbf_peak_present(const struct tbf_sched_data *q) in tbf_peak_present() 211 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_dequeue() local 271 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_reset() local 296 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_change() local 419 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_init() local 434 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_destroy() local 442 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_dump() local 481 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_dump_class() local [all …]
|
D | sch_netem.c | 200 static bool loss_4state(struct netem_sched_data *q) in loss_4state() 265 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() 286 static bool loss_event(struct netem_sched_data *q) in loss_event() 345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) in packet_time_ns() 362 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_reset() local 380 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_enqueue() local 437 struct netem_sched_data *q = qdisc_priv(sch); in netem_enqueue() local 629 static void get_slot_next(struct netem_sched_data *q, u64 now) in get_slot_next() 648 static struct sk_buff *netem_peek(struct netem_sched_data *q) in netem_peek() 665 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb) in netem_erase_head() [all …]
|
D | sch_fq_codel.c | 71 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() 80 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local 141 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local 188 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_enqueue() local 259 struct fq_codel_sched_data *q = qdisc_priv(sch); in dequeue_func() local 284 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dequeue() local 338 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_reset() local 371 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_change() local 443 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_destroy() local 453 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_init() local [all …]
|
D | sch_taprio.c | 96 static ktime_t taprio_get_time(struct taprio_sched *q) in taprio_get_time() 126 static void switch_schedules(struct taprio_sched *q, in switch_schedules() 174 static int length_to_duration(struct taprio_sched *q, int len) in length_to_duration() 195 struct taprio_sched *q = qdisc_priv(sch); in find_entry_to_transmit() local 262 struct taprio_sched *q = qdisc_priv(sch); in is_valid_interval() local 292 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) in get_tcp_tstamp() 344 struct taprio_sched *q = qdisc_priv(sch); in get_packet_txtime() local 415 struct taprio_sched *q = qdisc_priv(sch); in taprio_enqueue() local 442 struct taprio_sched *q = qdisc_priv(sch); in taprio_peek_soft() local 486 struct taprio_sched *q = qdisc_priv(sch); in taprio_peek_offload() local [all …]
|
D | sch_codel.c | 91 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local 114 struct codel_sched_data *q; in codel_qdisc_enqueue() local 136 struct codel_sched_data *q = qdisc_priv(sch); in codel_change() local 192 struct codel_sched_data *q = qdisc_priv(sch); in codel_init() local 218 struct codel_sched_data *q = qdisc_priv(sch); in codel_dump() local 247 const struct codel_sched_data *q = qdisc_priv(sch); in codel_dump_stats() local 273 struct codel_sched_data *q = qdisc_priv(sch); in codel_reset() local
|
D | sch_skbprio.c | 40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q) in calc_new_high_prio() 53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q) in calc_new_low_prio() 72 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() local 141 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_dequeue() local 179 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_init() local 210 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_reset() local 226 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_destroy() local 253 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_dump_class_stats() local
|
D | sch_generic.c | 51 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) in __skb_dequeue_bad_txq() 86 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) in qdisc_dequeue_skb_bad_txq() 96 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, in qdisc_enqueue_skb_bad_txq() 120 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) in dev_requeue_skb() 152 static void try_bulk_dequeue_skb(struct Qdisc *q, in try_bulk_dequeue_skb() 176 static void try_bulk_dequeue_skb_slow(struct Qdisc *q, in try_bulk_dequeue_skb_slow() 202 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, in dequeue_skb() 285 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, in sch_direct_xmit() 357 static inline bool qdisc_restart(struct Qdisc *q, int *packets) in qdisc_restart() 379 void __qdisc_run(struct Qdisc *q) in __qdisc_run() [all …]
|
D | sch_gred.c | 98 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local 113 struct gred_sched_data *q, in gred_backlog() 128 struct gred_sched_data *q) in gred_load_wred_set() 135 struct gred_sched_data *q) in gred_store_wred_set() 141 static int gred_use_ecn(struct gred_sched_data *q) in gred_use_ecn() 146 static int gred_use_harddrop(struct gred_sched_data *q) in gred_use_harddrop() 167 struct gred_sched_data *q = NULL; in gred_enqueue() local 268 struct gred_sched_data *q; in gred_dequeue() local 300 struct gred_sched_data *q = t->tab[i]; in gred_reset() local 332 struct gred_sched_data *q = table->tab[i]; in gred_offload() local [all …]
|
D | sch_pie.c | 94 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local 154 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() local 202 struct pie_sched_data *q = qdisc_priv(sch); in pie_change() local 267 struct pie_sched_data *q = qdisc_priv(sch); in pie_process_dequeue() local 332 struct pie_sched_data *q = qdisc_priv(sch); in calculate_probability() local 445 struct pie_sched_data *q = from_timer(q, t, adapt_timer); in pie_timer() local 461 struct pie_sched_data *q = qdisc_priv(sch); in pie_init() local 483 struct pie_sched_data *q = qdisc_priv(sch); in pie_dump() local 512 struct pie_sched_data *q = qdisc_priv(sch); in pie_dump_stats() local 543 struct pie_sched_data *q = qdisc_priv(sch); in pie_reset() local [all …]
|
D | sch_htb.c | 126 struct Qdisc *q; member 182 struct htb_sched *q = qdisc_priv(sch); in htb_find() local 212 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local 300 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() 343 static inline void htb_add_class_to_row(struct htb_sched *q, in htb_add_class_to_row() 372 static inline void htb_remove_class_from_row(struct htb_sched *q, in htb_remove_class_from_row() 400 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() 435 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() 526 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() 555 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() [all …]
|
/net/sctp/ |
D | outqueue.c | 58 static inline void sctp_outq_head_data(struct sctp_outq *q, in sctp_outq_head_data() 73 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) in sctp_outq_dequeue_data() 79 static inline void sctp_outq_tail_data(struct sctp_outq *q, in sctp_outq_tail_data() 190 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) in sctp_outq_init() 205 static void __sctp_outq_teardown(struct sctp_outq *q) in __sctp_outq_teardown() 266 void sctp_outq_teardown(struct sctp_outq *q) in sctp_outq_teardown() 273 void sctp_outq_free(struct sctp_outq *q) in sctp_outq_free() 280 void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) in sctp_outq_tail() 384 struct sctp_outq *q = &asoc->outqueue; in sctp_prsctp_prune_unsent() local 446 void sctp_retransmit_mark(struct sctp_outq *q, in sctp_retransmit_mark() [all …]
|
/net/ipv4/ |
D | inet_fragment.c | 54 static void fragrun_append_to_last(struct inet_frag_queue *q, in fragrun_append_to_last() 65 static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb) in fragrun_create() 223 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue, in inet_frag_destroy_rcu() local 254 void inet_frag_destroy(struct inet_frag_queue *q) in inet_frag_destroy() 279 struct inet_frag_queue *q; in inet_frag_alloc() local 301 struct inet_frag_queue *q; in inet_frag_create() local 344 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, in inet_frag_queue_insert() 408 void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, in inet_frag_reasm_prepare() 477 void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, in inet_frag_reasm_finish() 545 struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q) in inet_frag_pull_head()
|