Home
last modified time | relevance | path

Searched refs:bucket (Results 1 – 25 of 39) sorted by relevance

12

/drivers/interconnect/qcom/
Dbcm-voter.c64 size_t i, bucket; in bcm_aggregate() local
69 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate()
72 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate()
74 agg_avg[bucket] = max(agg_avg[bucket], temp); in bcm_aggregate()
76 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, in bcm_aggregate()
78 agg_peak[bucket] = max(agg_peak[bucket], temp); in bcm_aggregate()
81 temp = agg_avg[bucket] * bcm->vote_scale; in bcm_aggregate()
82 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); in bcm_aggregate()
84 temp = agg_peak[bucket] * bcm->vote_scale; in bcm_aggregate()
85 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); in bcm_aggregate()
[all …]
/drivers/infiniband/sw/rdmavt/
Dtrace_qp.h18 TP_PROTO(struct rvt_qp *qp, u32 bucket),
19 TP_ARGS(qp, bucket),
23 __field(u32, bucket)
28 __entry->bucket = bucket;
34 __entry->bucket
39 TP_PROTO(struct rvt_qp *qp, u32 bucket),
40 TP_ARGS(qp, bucket));
43 TP_PROTO(struct rvt_qp *qp, u32 bucket),
44 TP_ARGS(qp, bucket));
/drivers/cpuidle/governors/
Dmenu.c114 unsigned int bucket; member
122 int bucket = 0; in which_bucket() local
131 bucket = BUCKETS/2; in which_bucket()
134 return bucket; in which_bucket()
136 return bucket + 1; in which_bucket()
138 return bucket + 2; in which_bucket()
140 return bucket + 3; in which_bucket()
142 return bucket + 4; in which_bucket()
143 return bucket + 5; in which_bucket()
291 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); in menu_select()
[all …]
/drivers/md/bcache/
Dalloc.c76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen()
89 struct bucket *b; in bch_rescale_priorities()
125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket()
140 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket()
153 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket()
181 struct bucket *b; in invalidate_buckets_lru()
218 struct bucket *b; in invalidate_buckets_fifo()
241 struct bucket *b; in invalidate_buckets_random()
302 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
[all …]
Dbcache.h197 struct bucket { struct
210 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
216 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
217 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
451 struct bucket *buckets;
453 DECLARE_HEAP(struct bucket *, heap);
814 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET()
910 static inline uint8_t bucket_gc_gen(struct bucket *b) in bucket_gc_gen()
974 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
977 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
[all …]
Dextents.c54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local
58 bucket < ca->sb.first_bucket || in __ptr_invalid()
59 bucket >= ca->sb.nbuckets) in __ptr_invalid()
75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local
80 if (bucket < ca->sb.first_bucket) in bch_ptr_status()
82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status()
177 struct bucket *g; in btree_ptr_bad_expensive()
510 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
Dmovinggc.c185 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp()
192 struct bucket *b; in bucket_heap_top()
200 struct bucket *b; in bch_moving_gc()
Dsuper.c593 static void prio_io(struct cache *ca, uint64_t bucket, int op, in prio_io() argument
601 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
617 struct bucket *b; in bch_prio_write()
647 long bucket; in bch_prio_write() local
663 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); in bch_prio_write()
664 BUG_ON(bucket == -1); in bch_prio_write()
667 prio_io(ca, bucket, REQ_OP_WRITE, 0); in bch_prio_write()
670 ca->prio_buckets[i] = bucket; in bch_prio_write()
671 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
695 static int prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
[all …]
/drivers/md/
Ddm-clone-target.c564 #define bucket_lock_irqsave(bucket, flags) \ argument
565 spin_lock_irqsave(&(bucket)->lock, flags)
567 #define bucket_unlock_irqrestore(bucket, flags) \ argument
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
570 #define bucket_lock_irq(bucket) \ argument
571 spin_lock_irq(&(bucket)->lock)
573 #define bucket_unlock_irq(bucket) \ argument
574 spin_unlock_irq(&(bucket)->lock)
579 struct hash_table_bucket *bucket; in hash_table_init() local
588 bucket = clone->ht + i; in hash_table_init()
[all …]
Ddm-ps-historical-service-time.c150 int bucket = clamp(delta >> HST_BUCKET_SHIFT, 0ULL, in hst_weight() local
153 return s->weights[bucket]; in hst_weight()
Ddm-region-hash.c274 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() local
276 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
Ddm-cache-policy-smq.c606 static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket) in h_head() argument
608 return to_entry(ht->es, ht->buckets[bucket]); in h_head()
616 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument
618 e->hash_next = ht->buckets[bucket]; in __h_insert()
619 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
/drivers/md/persistent-data/
Ddm-transaction-manager.c106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
126 unsigned bucket; in insert_shadow() local
132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
143 struct hlist_head *bucket; in wipe_shadow_table() local
148 bucket = tm->buckets + i; in wipe_shadow_table()
149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table()
152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
/drivers/net/ethernet/freescale/fman/
Dfman_dtsec.c533 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, in set_bucket() argument
536 int reg_idx = (bucket >> 5) & 0xf; in set_bucket()
537 int bit_idx = bucket & 0x1f; in set_bucket()
1056 s32 bucket; in dtsec_add_hash_mac_address() local
1087 bucket = (s32)((crc >> 23) & 0x1ff); in dtsec_add_hash_mac_address()
1089 bucket = (s32)((crc >> 24) & 0xff); in dtsec_add_hash_mac_address()
1094 bucket += 0x100; in dtsec_add_hash_mac_address()
1097 set_bucket(dtsec->regs, bucket, true); in dtsec_add_hash_mac_address()
1109 &dtsec->multicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address()
1112 &dtsec->unicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address()
[all …]
/drivers/net/wireguard/
Dratelimiter.c92 struct hlist_head *bucket; in wg_ratelimiter_allow() local
97 bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & in wg_ratelimiter_allow()
104 bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & in wg_ratelimiter_allow()
111 hlist_for_each_entry_rcu(entry, bucket, hash) { in wg_ratelimiter_allow()
149 hlist_add_head_rcu(&entry->hash, bucket); in wg_ratelimiter_allow()
/drivers/misc/vmw_vmci/
Dvmci_doorbell.c120 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local
123 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find()
139 u32 bucket; in dbell_index_table_add() local
187 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add()
188 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add()
355 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local
360 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_bo_list.c192 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; in amdgpu_bo_list_get_list() local
197 INIT_LIST_HEAD(&bucket[i]); in amdgpu_bo_list_get_list()
209 list_add_tail(&e->tv.head, &bucket[priority]); in amdgpu_bo_list_get_list()
216 list_splice(&bucket[i], validated); in amdgpu_bo_list_get_list()
/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.c437 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans()
552 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket) in brcmf_pno_find_reqid_by_bucket() argument
558 if (bucket < pi->n_reqs) in brcmf_pno_find_reqid_by_bucket()
559 reqid = pi->reqs[bucket]->reqid; in brcmf_pno_find_reqid_by_bucket()
Dpno.h61 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
/drivers/crypto/nx/
Dnx-common-pseries.c152 int bucket = fls(time); in ibm_nx842_incr_hist() local
154 if (bucket) in ibm_nx842_incr_hist()
155 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); in ibm_nx842_incr_hist()
157 atomic64_inc(&times[bucket]); in ibm_nx842_incr_hist()
/drivers/gpu/drm/radeon/
Dradeon_cs.c48 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member
56 INIT_LIST_HEAD(&b->bucket[i]); in radeon_cs_buckets_init()
67 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); in radeon_cs_buckets_add()
77 list_splice(&b->bucket[i], out_list); in radeon_cs_buckets_get_list()
/drivers/atm/
Dhorizon.c2266 unsigned int bucket; in hrz_open()
2306 bucket = mbs*(pcr-scr)/pcr; in hrz_open()
2307 if (bucket*pcr != mbs*(pcr-scr)) in hrz_open()
2308 bucket += 1; in hrz_open()
2309 if (bucket > BUCKET_MAX_SIZE) { in hrz_open()
2311 bucket, BUCKET_MAX_SIZE); in hrz_open()
2312 bucket = BUCKET_MAX_SIZE; in hrz_open()
2315 vcc.tx_bucket_bits = bucket; in hrz_open()
/drivers/net/ethernet/mellanox/mlx4/
Den_netdev.c707 struct hlist_head *bucket; in mlx4_en_replace_mac() local
713 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_replace_mac()
714 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_replace_mac()
1144 struct hlist_head *bucket; in mlx4_en_do_uc_filter() local
1155 bucket = &priv->mac_hash[i]; in mlx4_en_do_uc_filter()
1156 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_do_uc_filter()
1199 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_do_uc_filter()
1200 hlist_for_each_entry(entry, bucket, hlist) { in mlx4_en_do_uc_filter()
1240 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_do_uc_filter()
1241 hlist_add_head_rcu(&entry->hlist, bucket); in mlx4_en_do_uc_filter()
[all …]
/drivers/target/iscsi/cxgbit/
Dcxgbit_cm.c85 int bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_add() local
90 p->next = cdev->np_hash_tab[bucket]; in cxgbit_np_hash_add()
91 cdev->np_hash_tab[bucket] = p; in cxgbit_np_hash_add()
101 int stid = -1, bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_find() local
105 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) { in cxgbit_np_hash_find()
118 int stid = -1, bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_del() local
119 struct np_info *p, **prev = &cdev->np_hash_tab[bucket]; in cxgbit_np_hash_del()
/drivers/media/v4l2-core/
Dv4l2-ctrls-core.c1032 int bucket; in find_ref() local
1039 bucket = id % hdl->nr_of_buckets; in find_ref()
1046 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; in find_ref()
1087 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ in handler_new_ref() local
1143 new_ref->next = hdl->buckets[bucket]; in handler_new_ref()
1144 hdl->buckets[bucket] = new_ref; in handler_new_ref()

12