/net/ceph/crush/ |
D | mapper.c | 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose() 100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose() 112 if (p < bucket->size - 1) { in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() [all …]
|
/net/sched/ |
D | sch_hhf.c | 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 331 struct sk_buff *skb = bucket->head; in dequeue_head() 333 bucket->head = skb->next; in dequeue_head() 339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() [all …]
|
/net/9p/ |
D | error.c | 181 int bucket; in p9_error_init() local 184 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 185 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 190 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 192 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 210 int bucket; in p9_errstr2errno() local 214 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 215 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/net/vmw_vsock/ |
D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
/net/rxrpc/ |
D | proc.c | 256 unsigned int bucket, n; in rxrpc_peer_seq_start() local 266 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 268 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start() 273 if (bucket == 0) in rxrpc_peer_seq_start() 279 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in rxrpc_peer_seq_start() 282 bucket++; in rxrpc_peer_seq_start() 284 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 291 unsigned int bucket, n; in rxrpc_peer_seq_next() local 298 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 300 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); in rxrpc_peer_seq_next() [all …]
|
/net/core/ |
D | sock_map.c | 608 struct bpf_htab_bucket *bucket; in __sock_hash_lookup_elem() local 614 bucket = sock_hash_select_bucket(htab, hash); in __sock_hash_lookup_elem() 615 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); in __sock_hash_lookup_elem() 632 struct bpf_htab_bucket *bucket; in sock_hash_delete_from_link() local 635 bucket = sock_hash_select_bucket(htab, elem->hash); in sock_hash_delete_from_link() 641 raw_spin_lock_bh(&bucket->lock); in sock_hash_delete_from_link() 642 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, in sock_hash_delete_from_link() 649 raw_spin_unlock_bh(&bucket->lock); in sock_hash_delete_from_link() 656 struct bpf_htab_bucket *bucket; in sock_hash_delete_elem() local 664 bucket = sock_hash_select_bucket(htab, hash); in sock_hash_delete_elem() [all …]
|
D | net-procfs.c | 35 unsigned int bucket; in dev_from_bucket() local 42 bucket = get_bucket(*pos) + 1; in dev_from_bucket() 43 *pos = set_bucket_offset(bucket, 1); in dev_from_bucket() 44 } while (bucket < NETDEV_HASHENTRIES); in dev_from_bucket()
|
D | bpf_sk_storage.c | 18 struct bucket { struct 49 struct bucket *buckets; 93 static struct bucket *select_bucket(struct bpf_sk_storage_map *smap, in select_bucket() 225 struct bucket *b; in selem_unlink_map() 242 struct bucket *b = select_bucket(smap, selem); in selem_link_map() 557 struct bucket *b; in bpf_sk_storage_map_free()
|
/net/atm/ |
D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
/net/ipv6/ |
D | route.c | 1460 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, in rt6_remove_exception() argument 1466 if (!bucket || !rt6_ex) in rt6_remove_exception() 1482 WARN_ON_ONCE(!bucket->depth); in rt6_remove_exception() 1483 bucket->depth--; in rt6_remove_exception() 1489 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) in rt6_exception_remove_oldest() argument 1493 if (!bucket) in rt6_exception_remove_oldest() 1496 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { in rt6_exception_remove_oldest() 1500 rt6_remove_exception(bucket, oldest); in rt6_exception_remove_oldest() 1532 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, in __rt6_find_exception_spinlock() argument 1539 if (!(*bucket) || !daddr) in __rt6_find_exception_spinlock() [all …]
|
D | ip6_flowlabel.c | 722 int bucket; member 733 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { in ip6fl_get_first() 734 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_first() 755 if (++state->bucket <= FL_HASH_MASK) { in ip6fl_get_next() 756 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_next()
|
D | ping.c | 203 int bucket = ((struct ping_iter_state *) seq->private)->bucket; in ping_v6_seq_show() local 207 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); in ping_v6_seq_show()
|
/net/netfilter/ |
D | xt_hashlimit.c | 1056 unsigned int *bucket; in dl_seq_start() local 1062 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); in dl_seq_start() 1063 if (!bucket) in dl_seq_start() 1066 *bucket = *pos; in dl_seq_start() 1067 return bucket; in dl_seq_start() 1073 unsigned int *bucket = v; in dl_seq_next() local 1075 *pos = ++(*bucket); in dl_seq_next() 1080 return bucket; in dl_seq_next() 1087 unsigned int *bucket = v; in dl_seq_stop() local 1089 if (!IS_ERR(bucket)) in dl_seq_stop() [all …]
|
D | nf_conntrack_core.c | 737 unsigned int bucket, hsize; in ____nf_conntrack_find() local 741 bucket = reciprocal_scale(hash, hsize); in ____nf_conntrack_find() 743 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { in ____nf_conntrack_find() 760 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find() 1167 unsigned int i, bucket; in early_drop() local 1176 bucket = reciprocal_scale(hash, hsize); in early_drop() 1178 bucket = (bucket + 1) % hsize; in early_drop() 1180 drops = early_drop_list(net, &ct_hash[bucket]); in early_drop() 2048 void *data, unsigned int *bucket) in get_next_corpse() argument 2055 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { in get_next_corpse() [all …]
|
D | xt_recent.c | 476 unsigned int bucket; member 489 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start() 490 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start() 504 while (head == &t->iphash[st->bucket]) { in recent_seq_next() 505 if (++st->bucket >= ip_list_hash_size) in recent_seq_next() 507 head = t->iphash[st->bucket].next; in recent_seq_next()
|
D | nf_conntrack_expect.c | 547 unsigned int bucket; member 555 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first() 556 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_first() 570 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next() 572 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_next()
|
D | nf_conntrack_standalone.c | 101 unsigned int bucket; member 110 for (st->bucket = 0; in ct_get_first() 111 st->bucket < st->htable_size; in ct_get_first() 112 st->bucket++) { in ct_get_first() 114 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_first() 128 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next() 129 if (++st->bucket >= st->htable_size) in ct_get_next() 133 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_next()
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 103 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member 172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 239 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush() 268 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check() 324 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire() 363 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
|
D | ip_vs_lblcr.c | 273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member 335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() 433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check() 488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire() 526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
|
/net/openvswitch/ |
D | vport.c | 96 struct hlist_head *bucket = hash_bucket(net, name); in ovs_vport_locate() local 99 hlist_for_each_entry_rcu(vport, bucket, hash_node) in ovs_vport_locate() 194 struct hlist_head *bucket; in ovs_vport_add() local 205 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add() 207 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
|
D | meter.c | 254 band->bucket = (band->burst_size + band->rate) * 1000ULL; in dp_meter_create() 255 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 503 band->bucket += delta_ms * band->rate; in ovs_meter_execute() 504 if (band->bucket > max_bucket_size) in ovs_meter_execute() 505 band->bucket = max_bucket_size; in ovs_meter_execute() 507 if (band->bucket >= cost) { in ovs_meter_execute() 508 band->bucket -= cost; in ovs_meter_execute()
|
/net/batman-adv/ |
D | bridge_loop_avoidance.c | 2265 struct batadv_hashtable *hash, unsigned int bucket, in batadv_bla_claim_dump_bucket() argument 2272 spin_lock_bh(&hash->list_locks[bucket]); in batadv_bla_claim_dump_bucket() 2275 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) { in batadv_bla_claim_dump_bucket() 2289 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_bla_claim_dump_bucket() 2308 int bucket = cb->args[0]; in batadv_bla_claim_dump() local 2333 while (bucket < hash->size) { in batadv_bla_claim_dump() 2335 hash, bucket, &idx)) in batadv_bla_claim_dump() 2337 bucket++; in batadv_bla_claim_dump() 2340 cb->args[0] = bucket; in batadv_bla_claim_dump() 2504 unsigned int bucket, int *idx_skip) in batadv_bla_backbone_dump_bucket() argument [all …]
|
D | distributed-arp-table.c | 954 struct batadv_hashtable *hash, unsigned int bucket, in batadv_dat_cache_dump_bucket() argument 960 spin_lock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 963 hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) { in batadv_dat_cache_dump_bucket() 968 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 977 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 997 int bucket = cb->args[0]; in batadv_dat_cache_dump() local 1022 while (bucket < hash->size) { in batadv_dat_cache_dump() 1023 if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket, in batadv_dat_cache_dump() 1027 bucket++; in batadv_dat_cache_dump() 1031 cb->args[0] = bucket; in batadv_dat_cache_dump()
|
/net/ipv4/ |
D | tcp_ipv4.c | 2183 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2189 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2203 if (++st->bucket < INET_LHTABLE_SIZE) in listening_get_next() 2213 st->bucket = 0; in listening_get_idx() 2226 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); in empty_bucket() 2241 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { in established_get_first() 2244 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); in established_get_first() 2251 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { in established_get_first() 2284 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); in established_get_next() 2285 ++st->bucket; in established_get_next() [all …]
|
/net/llc/ |
D | llc_proc.c | 67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) in laddr_hash_next() argument 72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) in laddr_hash_next() 73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
|