/drivers/staging/lustre/lustre/obdclass/ |
D | lustre_handles.c | 62 struct handle_bucket *bucket; in class_handle_hash() local 89 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; in class_handle_hash() 90 spin_lock(&bucket->lock); in class_handle_hash() 91 list_add_rcu(&h->h_link, &bucket->head); in class_handle_hash() 93 spin_unlock(&bucket->lock); in class_handle_hash() 123 struct handle_bucket *bucket; in class_handle_unhash() local 125 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); in class_handle_unhash() 127 spin_lock(&bucket->lock); in class_handle_unhash() 129 spin_unlock(&bucket->lock); in class_handle_unhash() 135 struct handle_bucket *bucket; in class_handle2object() local [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | trace_qp.h | 60 TP_PROTO(struct rvt_qp *qp, u32 bucket), 61 TP_ARGS(qp, bucket), 65 __field(u32, bucket) 70 __entry->bucket = bucket; 76 __entry->bucket 81 TP_PROTO(struct rvt_qp *qp, u32 bucket), 82 TP_ARGS(qp, bucket)); 85 TP_PROTO(struct rvt_qp *qp, u32 bucket), 86 TP_ARGS(qp, bucket));
|
/drivers/infiniband/core/ |
D | uverbs_ioctl_merge.c | 71 num_objects_fld, objects_fld, bucket,\ argument 79 bucket, min_id) 81 #define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \ argument 83 num_objects, objects, bucket, min_id) 85 #define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\ argument 87 num_methods, methods, bucket, min_id) 89 #define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\ argument 91 num_attrs, attrs, bucket, min_id) 110 u16 bucket, in get_elements_above_id() argument 123 if (GET_NS_ID(id) != bucket) in get_elements_above_id() [all …]
|
D | fmr_pool.c | 120 struct hlist_head *bucket; in ib_fmr_cache_lookup() local 126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 128 hlist_for_each_entry(fmr, bucket, cache_node) in ib_fmr_cache_lookup()
|
/drivers/cpuidle/governors/ |
D | menu.c | 130 unsigned int bucket; member 143 int bucket = 0; in which_bucket() local 152 bucket = BUCKETS/2; in which_bucket() 155 return bucket; in which_bucket() 157 return bucket + 1; in which_bucket() 159 return bucket + 2; in which_bucket() 161 return bucket + 3; in which_bucket() 163 return bucket + 4; in which_bucket() 164 return bucket + 5; in which_bucket() 320 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); in menu_select() [all …]
|
/drivers/md/bcache/ |
D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 89 struct bucket *b; in bch_rescale_priorities() 126 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() 131 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 141 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 154 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 182 struct bucket *b; in invalidate_buckets_lru() 219 struct bucket *b; in invalidate_buckets_fifo() 242 struct bucket *b; in invalidate_buckets_random() 301 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument [all …]
|
D | bcache.h | 195 struct bucket { struct 208 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); 214 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); 215 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 421 struct bucket *buckets; 423 DECLARE_HEAP(struct bucket *, heap); 731 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET() 829 static inline uint8_t bucket_gc_gen(struct bucket *b) in bucket_gc_gen() 865 uint8_t bch_inc_gen(struct cache *, struct bucket *); 868 bool bch_can_invalidate_bucket(struct cache *, struct bucket *); [all …]
|
D | extents.c | 54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local 58 bucket < ca->sb.first_bucket || in __ptr_invalid() 59 bucket >= ca->sb.nbuckets) in __ptr_invalid() 75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local 80 if (bucket < ca->sb.first_bucket) in bch_ptr_status() 82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status() 176 struct bucket *g; in btree_ptr_bad_expensive() 507 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive() 537 struct bucket *g; in bch_extent_bad()
|
D | movinggc.c | 184 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp() 191 struct bucket *b; in bucket_heap_top() 198 struct bucket *b; in bch_moving_gc()
|
D | super.c | 502 static void prio_io(struct cache *ca, uint64_t bucket, int op, in prio_io() argument 510 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io() 526 struct bucket *b; in bch_prio_write() 542 long bucket; in bch_prio_write() local 558 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); in bch_prio_write() 559 BUG_ON(bucket == -1); in bch_prio_write() 562 prio_io(ca, bucket, REQ_OP_WRITE, 0); in bch_prio_write() 565 ca->prio_buckets[i] = bucket; in bch_prio_write() 566 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write() 589 static void prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument [all …]
|
/drivers/md/persistent-data/ |
D | dm-transaction-manager.c | 106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local 110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow() 126 unsigned bucket; in insert_shadow() local 132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow() 134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow() 143 struct hlist_head *bucket; in wipe_shadow_table() local 148 bucket = tm->buckets + i; in wipe_shadow_table() 149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table() 152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
|
/drivers/net/ethernet/freescale/fman/ |
D | fman_dtsec.c | 532 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, in set_bucket() argument 535 int reg_idx = (bucket >> 5) & 0xf; in set_bucket() 536 int bit_idx = bucket & 0x1f; in set_bucket() 1059 s32 bucket; in dtsec_add_hash_mac_address() local 1090 bucket = (s32)((crc >> 23) & 0x1ff); in dtsec_add_hash_mac_address() 1092 bucket = (s32)((crc >> 24) & 0xff); in dtsec_add_hash_mac_address() 1097 bucket += 0x100; in dtsec_add_hash_mac_address() 1100 set_bucket(dtsec->regs, bucket, true); in dtsec_add_hash_mac_address() 1112 &dtsec->multicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address() 1115 &dtsec->unicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address() [all …]
|
/drivers/misc/vmw_vmci/ |
D | vmci_doorbell.c | 128 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local 131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find() 147 u32 bucket; in dbell_index_table_add() local 195 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add() 196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add() 360 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local 365 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_bo_list.c | 222 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; in amdgpu_bo_list_get_list() local 226 INIT_LIST_HEAD(&bucket[i]); in amdgpu_bo_list_get_list() 238 &bucket[priority]); in amdgpu_bo_list_get_list() 245 list_splice(&bucket[i], validated); in amdgpu_bo_list_get_list()
|
/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | pno.c | 438 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans() 548 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket) in brcmf_pno_find_reqid_by_bucket() argument 554 if (bucket < pi->n_reqs) in brcmf_pno_find_reqid_by_bucket() 555 reqid = pi->reqs[bucket]->reqid; in brcmf_pno_find_reqid_by_bucket()
|
D | pno.h | 72 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
|
/drivers/crypto/nx/ |
D | nx-842-pseries.c | 140 int bucket = fls(time); in ibm_nx842_incr_hist() local 142 if (bucket) in ibm_nx842_incr_hist() 143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); in ibm_nx842_incr_hist() 145 atomic64_inc(×[bucket]); in ibm_nx842_incr_hist()
|
/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 42 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member 50 INIT_LIST_HEAD(&b->bucket[i]); in radeon_cs_buckets_init() 61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); in radeon_cs_buckets_add() 71 list_splice(&b->bucket[i], out_list); in radeon_cs_buckets_get_list()
|
/drivers/atm/ |
D | horizon.c | 2281 unsigned int bucket; in hrz_open() 2321 bucket = mbs*(pcr-scr)/pcr; in hrz_open() 2322 if (bucket*pcr != mbs*(pcr-scr)) in hrz_open() 2323 bucket += 1; in hrz_open() 2324 if (bucket > BUCKET_MAX_SIZE) { in hrz_open() 2326 bucket, BUCKET_MAX_SIZE); in hrz_open() 2327 bucket = BUCKET_MAX_SIZE; in hrz_open() 2330 vcc.tx_bucket_bits = bucket; in hrz_open()
|
/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
D | hclge_tm.c | 267 enum hclge_shap_bucket bucket, u8 pg_id, in hclge_tm_pg_shapping_cfg() argument 274 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : in hclge_tm_pg_shapping_cfg() 292 enum hclge_shap_bucket bucket, u8 pri_id, in hclge_tm_pri_shapping_cfg() argument 300 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : in hclge_tm_pri_shapping_cfg()
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_netdev.c | 695 struct hlist_head *bucket; in mlx4_en_replace_mac() local 701 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_replace_mac() 702 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_replace_mac() 1132 struct hlist_head *bucket; in mlx4_en_do_uc_filter() local 1143 bucket = &priv->mac_hash[i]; in mlx4_en_do_uc_filter() 1144 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_do_uc_filter() 1187 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_do_uc_filter() 1188 hlist_for_each_entry(entry, bucket, hlist) { in mlx4_en_do_uc_filter() 1228 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_do_uc_filter() 1229 hlist_add_head_rcu(&entry->hlist, bucket); in mlx4_en_do_uc_filter() [all …]
|
/drivers/target/iscsi/cxgbit/ |
D | cxgbit_cm.c | 88 int bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_add() local 93 p->next = cdev->np_hash_tab[bucket]; in cxgbit_np_hash_add() 94 cdev->np_hash_tab[bucket] = p; in cxgbit_np_hash_add() 104 int stid = -1, bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_find() local 108 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) { in cxgbit_np_hash_find() 121 int stid = -1, bucket = cxgbit_np_hashfn(cnp); in cxgbit_np_hash_del() local 122 struct np_info *p, **prev = &cdev->np_hash_tab[bucket]; in cxgbit_np_hash_del()
|
/drivers/md/ |
D | dm-cache-policy-smq.c | 593 static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket) in h_head() argument 595 return to_entry(ht->es, ht->buckets[bucket]); in h_head() 603 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument 605 e->hash_next = ht->buckets[bucket]; in __h_insert() 606 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
|
D | dm-region-hash.c | 272 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() local 274 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
|
/drivers/media/v4l2-core/ |
D | v4l2-ctrls.c | 1832 int bucket; in find_ref() local 1839 bucket = id % hdl->nr_of_buckets; in find_ref() 1846 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; in find_ref() 1886 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ in handler_new_ref() local 1941 new_ref->next = hdl->buckets[bucket]; in handler_new_ref() 1942 hdl->buckets[bucket] = new_ref; in handler_new_ref()
|