/drivers/staging/lustre/lustre/obdclass/ |
D | lustre_handles.c | 66 struct handle_bucket *bucket; in class_handle_hash() local 93 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; in class_handle_hash() 94 spin_lock(&bucket->lock); in class_handle_hash() 95 list_add_rcu(&h->h_link, &bucket->head); in class_handle_hash() 97 spin_unlock(&bucket->lock); in class_handle_hash() 127 struct handle_bucket *bucket; in class_handle_unhash() local 129 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); in class_handle_unhash() 131 spin_lock(&bucket->lock); in class_handle_unhash() 133 spin_unlock(&bucket->lock); in class_handle_unhash() 139 struct handle_bucket *bucket; in class_handle2object() local [all …]
|
/drivers/cpuidle/governors/ |
D | menu.c | 127 unsigned int bucket; member 144 int bucket = 0; in which_bucket() local 153 bucket = BUCKETS/2; in which_bucket() 156 return bucket; in which_bucket() 158 return bucket + 1; in which_bucket() 160 return bucket + 2; in which_bucket() 162 return bucket + 3; in which_bucket() 164 return bucket + 4; in which_bucket() 165 return bucket + 5; in which_bucket() 312 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); in menu_select() [all …]
|
/drivers/md/bcache/ |
D | alloc.c | 74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 87 struct bucket *b; in bch_rescale_priorities() 124 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() 129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 180 struct bucket *b; in invalidate_buckets_lru() 217 struct bucket *b; in invalidate_buckets_fifo() 240 struct bucket *b; in invalidate_buckets_random() 300 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument [all …]
|
D | bcache.h | 194 struct bucket { struct 207 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); 213 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); 214 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 420 struct bucket *buckets; 422 DECLARE_HEAP(struct bucket *, heap); 731 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET() 829 static inline uint8_t bucket_gc_gen(struct bucket *b) in bucket_gc_gen() 864 uint8_t bch_inc_gen(struct cache *, struct bucket *); 867 bool bch_can_invalidate_bucket(struct cache *, struct bucket *); [all …]
|
D | extents.c | 53 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local 57 bucket < ca->sb.first_bucket || in __ptr_invalid() 58 bucket >= ca->sb.nbuckets) in __ptr_invalid() 74 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local 79 if (bucket < ca->sb.first_bucket) in bch_ptr_status() 81 if (bucket >= ca->sb.nbuckets) in bch_ptr_status() 175 struct bucket *g; in btree_ptr_bad_expensive() 506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive() 536 struct bucket *g; in bch_extent_bad()
|
D | movinggc.c | 188 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp() 195 struct bucket *b; in bucket_heap_top() 202 struct bucket *b; in bch_moving_gc()
|
D | super.c | 501 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) in prio_io() argument 508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io() 524 struct bucket *b; in bch_prio_write() 540 long bucket; in bch_prio_write() local 556 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); in bch_prio_write() 557 BUG_ON(bucket == -1); in bch_prio_write() 560 prio_io(ca, bucket, REQ_WRITE); in bch_prio_write() 563 ca->prio_buckets[i] = bucket; in bch_prio_write() 564 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write() 587 static void prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument [all …]
|
D | journal.c | 44 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); in journal_read_bucket() local 55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
|
D | btree.c | 1185 struct bucket *g; in __bch_btree_mark_key() 1241 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() 1646 struct bucket *b; in btree_gc_start() 1672 struct bucket *b; in bch_btree_gc_finish() 1864 struct bucket *b; in bch_initial_gc_finish()
|
D | sysfs.c | 775 struct bucket *b; in SHOW()
|
/drivers/md/persistent-data/ |
D | dm-transaction-manager.c | 106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local 110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow() 126 unsigned bucket; in insert_shadow() local 132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow() 134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow() 143 struct hlist_head *bucket; in wipe_shadow_table() local 148 bucket = tm->buckets + i; in wipe_shadow_table() 149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table() 152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
|
/drivers/misc/vmw_vmci/ |
D | vmci_doorbell.c | 128 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local 131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find() 147 u32 bucket; in dbell_index_table_add() local 195 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add() 196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add() 360 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local 365 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
|
/drivers/staging/rdma/hfi1/ |
D | trace.h | 331 TP_PROTO(struct hfi1_qp *qp, u32 bucket), 332 TP_ARGS(qp, bucket), 336 __field(u32, bucket) 341 __entry->bucket = bucket; 347 __entry->bucket 352 TP_PROTO(struct hfi1_qp *qp, u32 bucket), 353 TP_ARGS(qp, bucket)); 356 TP_PROTO(struct hfi1_qp *qp, u32 bucket), 357 TP_ARGS(qp, bucket));
|
/drivers/md/ |
D | dm-cache-policy-cleaner.c | 145 struct hlist_head *bucket = &hash->table[h]; in lookup_cache_entry() local 147 hlist_for_each_entry(cur, bucket, hlist) { in lookup_cache_entry() 151 hlist_add_head(&cur->hlist, bucket); in lookup_cache_entry()
|
D | dm-cache-policy-smq.c | 586 static struct entry *h_head(struct hash_table *ht, unsigned bucket) in h_head() argument 588 return to_entry(ht->es, ht->buckets[bucket]); in h_head() 596 static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument 598 e->hash_next = ht->buckets[bucket]; in __h_insert() 599 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
|
D | dm-region-hash.c | 272 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() local 274 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
|
D | dm-cache-policy-mq.c | 505 struct hlist_head *bucket = mq->table + h; in hash_lookup() local 508 hlist_for_each_entry(e, bucket, hlist) in hash_lookup() 511 hlist_add_head(&e->hlist, bucket); in hash_lookup()
|
/drivers/infiniband/core/ |
D | fmr_pool.c | 119 struct hlist_head *bucket; in ib_fmr_cache_lookup() local 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 127 hlist_for_each_entry(fmr, bucket, cache_node) in ib_fmr_cache_lookup()
|
/drivers/crypto/nx/ |
D | nx-842-pseries.c | 140 int bucket = fls(time); in ibm_nx842_incr_hist() local 142 if (bucket) in ibm_nx842_incr_hist() 143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); in ibm_nx842_incr_hist() 145 atomic64_inc(×[bucket]); in ibm_nx842_incr_hist()
|
/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 42 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member 50 INIT_LIST_HEAD(&b->bucket[i]); in radeon_cs_buckets_init() 61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); in radeon_cs_buckets_add() 71 list_splice(&b->bucket[i], out_list); in radeon_cs_buckets_get_list()
|
/drivers/atm/ |
D | horizon.c | 2280 unsigned int bucket; in hrz_open() 2320 bucket = mbs*(pcr-scr)/pcr; in hrz_open() 2321 if (bucket*pcr != mbs*(pcr-scr)) in hrz_open() 2322 bucket += 1; in hrz_open() 2323 if (bucket > BUCKET_MAX_SIZE) { in hrz_open() 2325 bucket, BUCKET_MAX_SIZE); in hrz_open() 2326 bucket = BUCKET_MAX_SIZE; in hrz_open() 2329 vcc.tx_bucket_bits = bucket; in hrz_open()
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_netdev.c | 643 struct hlist_head *bucket; in mlx4_en_replace_mac() local 649 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_replace_mac() 650 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_replace_mac() 1061 struct hlist_head *bucket; in mlx4_en_do_uc_filter() local 1072 bucket = &priv->mac_hash[i]; in mlx4_en_do_uc_filter() 1073 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_do_uc_filter() 1116 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_do_uc_filter() 1117 hlist_for_each_entry(entry, bucket, hlist) { in mlx4_en_do_uc_filter() 1157 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_do_uc_filter() 1158 hlist_add_head_rcu(&entry->hlist, bucket); in mlx4_en_do_uc_filter() [all …]
|
D | en_rx.c | 839 struct hlist_head *bucket; in mlx4_en_process_rx_cq() local 844 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_process_rx_cq() 846 hlist_for_each_entry_rcu(entry, bucket, hlist) { in mlx4_en_process_rx_cq()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_cs.c | 41 struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; member 49 INIT_LIST_HEAD(&b->bucket[i]); in amdgpu_cs_buckets_init() 60 list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); in amdgpu_cs_buckets_add() 70 list_splice(&b->bucket[i], out_list); in amdgpu_cs_buckets_get_list()
|
/drivers/media/v4l2-core/ |
D | v4l2-ctrls.c | 1792 int bucket; in find_ref() local 1799 bucket = id % hdl->nr_of_buckets; in find_ref() 1806 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; in find_ref() 1846 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ in handler_new_ref() local 1901 new_ref->next = hdl->buckets[bucket]; in handler_new_ref() 1902 hdl->buckets[bucket] = new_ref; in handler_new_ref()
|