/net/mptcp/ |
D | token.c | 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 156 struct token_bucket *bucket; in mptcp_token_new_connect() local 162 bucket = token_bucket(subflow->token); in mptcp_token_new_connect() [all …]
|
/net/ceph/crush/ |
D | mapper.c | 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose() 100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose() 112 if (p < bucket->size - 1) { in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() [all …]
|
/net/sched/ |
D | sch_hhf.c | 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 331 struct sk_buff *skb = bucket->head; in dequeue_head() 333 bucket->head = skb->next; in dequeue_head() 339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() [all …]
|
/net/9p/ |
D | error.c | 181 int bucket; in p9_error_init() local 184 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 185 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 190 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 192 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 210 int bucket; in p9_errstr2errno() local 214 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 215 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/net/vmw_vsock/ |
D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
/net/rxrpc/ |
D | proc.c | 250 unsigned int bucket, n; in rxrpc_peer_seq_start() local 260 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 262 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start() 267 if (bucket == 0) in rxrpc_peer_seq_start() 273 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in rxrpc_peer_seq_start() 276 bucket++; in rxrpc_peer_seq_start() 278 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 285 unsigned int bucket, n; in rxrpc_peer_seq_next() local 292 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 294 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); in rxrpc_peer_seq_next() [all …]
|
/net/ipv4/ |
D | nexthop.c | 166 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in nh_notifier_res_table_info_init() local 170 nhge = rtnl_dereference(bucket->nh_entry); in nh_notifier_res_table_info_init() 867 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) in nh_res_bucket_used_time() argument 869 return (unsigned long)atomic_long_read(&bucket->used_time); in nh_res_bucket_used_time() 874 const struct nh_res_bucket *bucket, in nh_res_bucket_idle_point() argument 877 unsigned long time = nh_res_bucket_used_time(bucket); in nh_res_bucket_idle_point() 880 if (time == bucket->migrated_time) in nh_res_bucket_idle_point() 893 struct nh_res_bucket *bucket) in nh_res_bucket_set_idle() argument 897 atomic_long_set(&bucket->used_time, (long)now); in nh_res_bucket_set_idle() 898 bucket->migrated_time = now; in nh_res_bucket_set_idle() [all …]
|
D | tcp_ipv4.c | 2344 for (; st->bucket <= tcp_hashinfo.lhash2_mask; st->bucket++) { in listening_get_first() 2349 ilb2 = &tcp_hashinfo.lhash2[st->bucket]; in listening_get_first() 2387 ilb2 = &tcp_hashinfo.lhash2[st->bucket]; in listening_get_next() 2389 ++st->bucket; in listening_get_next() 2398 st->bucket = 0; in listening_get_idx() 2411 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); in empty_bucket() 2423 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { in established_get_first() 2426 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); in established_get_first() 2433 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { in established_get_first() 2459 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); in established_get_next() [all …]
|
/net/core/ |
D | sock_map.c | 883 struct bpf_shtab_bucket *bucket; in __sock_hash_lookup_elem() local 889 bucket = sock_hash_select_bucket(htab, hash); in __sock_hash_lookup_elem() 890 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); in __sock_hash_lookup_elem() 907 struct bpf_shtab_bucket *bucket; in sock_hash_delete_from_link() local 910 bucket = sock_hash_select_bucket(htab, elem->hash); in sock_hash_delete_from_link() 916 raw_spin_lock_bh(&bucket->lock); in sock_hash_delete_from_link() 917 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, in sock_hash_delete_from_link() 924 raw_spin_unlock_bh(&bucket->lock); in sock_hash_delete_from_link() 931 struct bpf_shtab_bucket *bucket; in sock_hash_delete_elem() local 937 bucket = sock_hash_select_bucket(htab, hash); in sock_hash_delete_elem() [all …]
|
D | net-procfs.c | 32 unsigned int bucket; in dev_from_bucket() local 39 bucket = get_bucket(*pos) + 1; in dev_from_bucket() 40 *pos = set_bucket_offset(bucket, 1); in dev_from_bucket() 41 } while (bucket < NETDEV_HASHENTRIES); in dev_from_bucket()
|
/net/atm/ |
D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
/net/ipv6/ |
D | route.c | 1442 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, in rt6_remove_exception() argument 1448 if (!bucket || !rt6_ex) in rt6_remove_exception() 1464 WARN_ON_ONCE(!bucket->depth); in rt6_remove_exception() 1465 bucket->depth--; in rt6_remove_exception() 1471 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) in rt6_exception_remove_oldest() argument 1475 if (!bucket) in rt6_exception_remove_oldest() 1478 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { in rt6_exception_remove_oldest() 1482 rt6_remove_exception(bucket, oldest); in rt6_exception_remove_oldest() 1514 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, in __rt6_find_exception_spinlock() argument 1521 if (!(*bucket) || !daddr) in __rt6_find_exception_spinlock() [all …]
|
D | ip6_flowlabel.c | 750 int bucket; member 761 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { in ip6fl_get_first() 762 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_first() 783 if (++state->bucket <= FL_HASH_MASK) { in ip6fl_get_next() 784 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_next()
|
/net/netfilter/ |
D | xt_hashlimit.c | 1056 unsigned int *bucket; in dl_seq_start() local 1062 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); in dl_seq_start() 1063 if (!bucket) in dl_seq_start() 1066 *bucket = *pos; in dl_seq_start() 1067 return bucket; in dl_seq_start() 1073 unsigned int *bucket = v; in dl_seq_next() local 1075 *pos = ++(*bucket); in dl_seq_next() 1080 return bucket; in dl_seq_next() 1087 unsigned int *bucket = v; in dl_seq_stop() local 1089 if (!IS_ERR(bucket)) in dl_seq_stop() [all …]
|
D | xt_recent.c | 476 unsigned int bucket; member 489 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start() 490 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start() 504 while (head == &t->iphash[st->bucket]) { in recent_seq_next() 505 if (++st->bucket >= ip_list_hash_size) in recent_seq_next() 507 head = t->iphash[st->bucket].next; in recent_seq_next()
|
D | nf_conntrack_expect.c | 567 unsigned int bucket; member 575 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first() 576 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_first() 590 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next() 592 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_next()
|
D | nf_conntrack_core.c | 783 unsigned int bucket, hsize; in ____nf_conntrack_find() local 787 bucket = reciprocal_scale(hash, hsize); in ____nf_conntrack_find() 789 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { in ____nf_conntrack_find() 806 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find() 1394 unsigned int i, bucket; in early_drop() local 1403 bucket = reciprocal_scale(hash, hsize); in early_drop() 1405 bucket = (bucket + 1) % hsize; in early_drop() 1407 drops = early_drop_list(net, &ct_hash[bucket]); in early_drop() 2329 void *data, unsigned int *bucket) in get_next_corpse() argument 2336 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { in get_next_corpse() [all …]
|
D | nf_conntrack_standalone.c | 104 unsigned int bucket; member 113 for (st->bucket = 0; in ct_get_first() 114 st->bucket < st->htable_size; in ct_get_first() 115 st->bucket++) { in ct_get_first() 117 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_first() 131 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next() 132 if (++st->bucket >= st->htable_size) in ct_get_next() 136 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_next()
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 103 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member 172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 239 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush() 268 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check() 324 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire() 363 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
|
D | ip_vs_lblcr.c | 273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member 335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() 433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check() 488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire() 526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
|
/net/openvswitch/ |
D | vport.c | 97 struct hlist_head *bucket = hash_bucket(net, name); in ovs_vport_locate() local 100 hlist_for_each_entry_rcu(vport, bucket, hash_node, in ovs_vport_locate() 198 struct hlist_head *bucket; in ovs_vport_add() local 209 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add() 211 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
|
D | meter.c | 395 band->bucket = band->burst_size * 1000ULL; in dp_meter_create() 396 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 656 band->bucket += delta_ms * band->rate; in ovs_meter_execute() 657 if (band->bucket > max_bucket_size) in ovs_meter_execute() 658 band->bucket = max_bucket_size; in ovs_meter_execute() 660 if (band->bucket >= cost) { in ovs_meter_execute() 661 band->bucket -= cost; in ovs_meter_execute()
|
/net/batman-adv/ |
D | bridge_loop_avoidance.c | 2196 struct batadv_hashtable *hash, unsigned int bucket, in batadv_bla_claim_dump_bucket() argument 2203 spin_lock_bh(&hash->list_locks[bucket]); in batadv_bla_claim_dump_bucket() 2206 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) { in batadv_bla_claim_dump_bucket() 2220 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_bla_claim_dump_bucket() 2239 int bucket = cb->args[0]; in batadv_bla_claim_dump() local 2264 while (bucket < hash->size) { in batadv_bla_claim_dump() 2266 hash, bucket, &idx)) in batadv_bla_claim_dump() 2268 bucket++; in batadv_bla_claim_dump() 2271 cb->args[0] = bucket; in batadv_bla_claim_dump() 2367 unsigned int bucket, int *idx_skip) in batadv_bla_backbone_dump_bucket() argument [all …]
|
D | distributed-arp-table.c | 902 struct batadv_hashtable *hash, unsigned int bucket, in batadv_dat_cache_dump_bucket() argument 908 spin_lock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 911 hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) { in batadv_dat_cache_dump_bucket() 916 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 925 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 945 int bucket = cb->args[0]; in batadv_dat_cache_dump() local 970 while (bucket < hash->size) { in batadv_dat_cache_dump() 971 if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket, in batadv_dat_cache_dump() 975 bucket++; in batadv_dat_cache_dump() 979 cb->args[0] = bucket; in batadv_dat_cache_dump()
|
/net/llc/ |
D | llc_proc.c | 67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) in laddr_hash_next() argument 72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) in laddr_hash_next() 73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
|