/net/sched/ |
D | sch_sfq.c | 226 struct sfq_slot *slot = &q->slots[x]; in sfq_link() local 227 int qlen = slot->qlen; in sfq_link() 232 slot->dep.next = n; in sfq_link() 233 slot->dep.prev = p; in sfq_link() 275 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot) in slot_dequeue_tail() argument 277 struct sk_buff *skb = slot->skblist_prev; in slot_dequeue_tail() 279 slot->skblist_prev = skb->prev; in slot_dequeue_tail() 280 skb->prev->next = (struct sk_buff *)slot; in slot_dequeue_tail() 286 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot) in slot_dequeue_head() argument 288 struct sk_buff *skb = slot->skblist_next; in slot_dequeue_head() [all …]
|
D | sch_sfb.c | 72 u8 slot; /* current active bins (0 or 1) */ member 105 static u32 sfb_hash(const struct sk_buff *skb, u32 slot) in sfb_hash() argument 107 return sfb_skb_cb(skb)->hashes[slot]; in sfb_hash() 126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 129 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 154 static void decrement_one_qlen(u32 sfbhash, u32 slot, in decrement_one_qlen() argument 158 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() 205 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; in sfb_compute_qlen() 221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) in sfb_init_perturbation() argument 223 q->bins[slot].perturbation = net_random(); in sfb_init_perturbation() [all …]
|
D | sch_api.c | 442 int pkt_len, slot; in __qdisc_calculate_pkt_len() local 448 slot = pkt_len + stab->szopts.cell_align; in __qdisc_calculate_pkt_len() 449 if (unlikely(slot < 0)) in __qdisc_calculate_pkt_len() 450 slot = 0; in __qdisc_calculate_pkt_len() 452 slot >>= stab->szopts.cell_log; in __qdisc_calculate_pkt_len() 453 if (likely(slot < stab->szopts.tsize)) in __qdisc_calculate_pkt_len() 454 pkt_len = stab->data[slot]; in __qdisc_calculate_pkt_len() 457 (slot / stab->szopts.tsize) + in __qdisc_calculate_pkt_len() 458 stab->data[slot % stab->szopts.tsize]; in __qdisc_calculate_pkt_len()
|
D | sch_qfq.c | 866 u64 slot = (roundedS - grp->S) >> grp->slot_shift; in qfq_slot_insert() local 869 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { in qfq_slot_insert() 874 slot = QFQ_MAX_SLOTS - 2; in qfq_slot_insert() 877 i = (grp->front + slot) % QFQ_MAX_SLOTS; in qfq_slot_insert() 880 __set_bit(slot, &grp->full_slots); in qfq_slot_insert() 1398 struct hlist_head *slot) in qfq_drop_from_slot() argument 1404 hlist_for_each_entry(agg, slot, next) { in qfq_drop_from_slot()
|
/net/ipv4/ |
D | inet_timewait_sock.c | 216 const int slot) in inet_twdr_do_twkill_work() argument 231 inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) { in inet_twdr_do_twkill_work() 274 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { in inet_twdr_hangman() 275 twdr->thread_slots |= (1 << twdr->slot); in inet_twdr_hangman() 282 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); in inet_twdr_hangman() 345 int slot; in inet_twsk_schedule() local 371 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; in inet_twsk_schedule() 381 if (slot >= INET_TWDR_RECYCLE_SLOTS) { in inet_twsk_schedule() 384 slot = INET_TWDR_TWKILL_SLOTS - 1; in inet_twsk_schedule() 386 slot = DIV_ROUND_UP(timeo, twdr->period); in inet_twsk_schedule() [all …]
|
D | udp_diag.c | 98 int num, s_num, slot, s_slot; in udp_dump() local 105 for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { in udp_dump() 108 struct udp_hslot *hslot = &table->hash[slot]; in udp_dump() 143 cb->args[0] = slot; in udp_dump()
|
D | inet_hashtables.c | 247 unsigned int slot = hash & hashinfo->ehash_mask; in __inet_lookup_established() local 248 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; in __inet_lookup_established() 272 if (get_nulls_value(node) != slot) in __inet_lookup_established() 301 if (get_nulls_value(node) != slot) in __inet_lookup_established()
|
D | udp.c | 471 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); in __udp4_lib_lookup() local 472 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; in __udp4_lib_lookup() 528 if (get_nulls_value(node) != slot) in __udp4_lib_lookup()
|
/net/unix/ |
D | diag.c | 182 int num, s_num, slot, s_slot; in unix_diag_dump() local 191 for (slot = s_slot; in unix_diag_dump() 192 slot < ARRAY_SIZE(unix_socket_table); in unix_diag_dump() 193 s_num = 0, slot++) { in unix_diag_dump() 197 sk_for_each(sk, &unix_socket_table[slot]) { in unix_diag_dump() 215 cb->args[0] = slot; in unix_diag_dump()
|
/net/ipv6/ |
D | inet6_hashtables.c | 83 unsigned int slot = hash & hashinfo->ehash_mask; in __inet6_lookup_established() local 84 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; in __inet6_lookup_established() 103 if (get_nulls_value(node) != slot) in __inet6_lookup_established() 125 if (get_nulls_value(node) != slot) in __inet6_lookup_established()
|
D | udp.c | 262 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); in __udp6_lib_lookup() local 263 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; in __udp6_lib_lookup() 318 if (get_nulls_value(node) != slot) in __udp6_lib_lookup()
|
/net/llc/ |
D | llc_conn.c | 502 int slot = llc_sk_laddr_hashfn(sap, laddr); in __llc_lookup_established() local 503 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot]; in __llc_lookup_established() 526 if (unlikely(get_nulls_value(node) != slot)) in __llc_lookup_established() 561 int slot = llc_sk_laddr_hashfn(sap, laddr); in __llc_lookup_listener() local 562 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot]; in __llc_lookup_listener() 585 if (unlikely(get_nulls_value(node) != slot)) in __llc_lookup_listener()
|
D | llc_sap.c | 321 int slot = llc_sk_laddr_hashfn(sap, laddr); in llc_lookup_dgram() local 322 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot]; in llc_lookup_dgram() 345 if (unlikely(get_nulls_value(node) != slot)) in llc_lookup_dgram()
|
/net/irda/ |
D | irlap.c | 676 int slot; in irlap_generate_rand_time_slot() local 684 slot = s + rand % (S-s); in irlap_generate_rand_time_slot() 686 IRDA_ASSERT((slot >= s) || (slot < S), return 0;); in irlap_generate_rand_time_slot() 688 return slot; in irlap_generate_rand_time_slot()
|
D | irlap_event.c | 405 self->slot = irlap_generate_rand_time_slot(info->S, in irlap_state_ndm() 407 if (self->slot == info->s) { in irlap_state_ndm() 412 self->slot, in irlap_state_ndm() 700 if ((info->s >= self->slot) && (!self->frame_sent)) { in irlap_state_reply() 705 self->slot, in irlap_state_reply()
|
/net/bridge/ |
D | br_multicast.c | 1027 struct hlist_node *slot = NULL; in br_multicast_add_router() local 1032 slot = &p->rlist; in br_multicast_add_router() 1035 if (slot) in br_multicast_add_router() 1036 hlist_add_after_rcu(slot, &port->rlist); in br_multicast_add_router()
|