Home
last modified time | relevance | path

Searched refs:nr_entries (Results 1 – 25 of 33) sorted by relevance

12

/drivers/lightnvm/
Dpblk-rb.c48 static unsigned int pblk_rb_calculate_size(unsigned int nr_entries, in pblk_rb_calculate_size() argument
52 unsigned int max_sz = max(thr_sz, nr_entries); in pblk_rb_calculate_size()
81 unsigned int nr_entries; in pblk_rb_init() local
83 nr_entries = pblk_rb_calculate_size(size, threshold); in pblk_rb_init()
84 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry))); in pblk_rb_init()
88 power_size = get_count_order(nr_entries); in pblk_rb_init()
94 rb->nr_entries = (1 << power_size); in pblk_rb_init()
165 pblk_rl_init(&pblk->rl, rb->nr_entries, threshold); in pblk_rb_init()
197 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
201 unsigned int nr_entries) in pblk_rb_ptr_wrap() argument
[all …]
Dpblk-cache.c28 int nr_entries = pblk_get_secs(bio); in pblk_write_to_cache() local
38 ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); in pblk_write_to_cache()
59 for (i = 0; i < nr_entries; i++) { in pblk_write_to_cache()
70 atomic64_add(nr_entries, &pblk->user_wa); in pblk_write_to_cache()
73 atomic_long_add(nr_entries, &pblk->inflight_writes); in pblk_write_to_cache()
74 atomic_long_add(nr_entries, &pblk->req_writes); in pblk_write_to_cache()
77 pblk_rl_inserted(&pblk->rl, nr_entries); in pblk_write_to_cache()
Dpblk-rl.c36 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_may_insert() argument
41 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0)) in pblk_rl_user_may_insert()
50 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries) in pblk_rl_inserted() argument
55 atomic_sub(nr_entries, &rl->rb_space); in pblk_rl_inserted()
58 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_may_insert() argument
68 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_in() argument
70 atomic_add(nr_entries, &rl->rb_user_cnt); in pblk_rl_user_in()
87 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_in() argument
89 atomic_add(nr_entries, &rl->rb_gc_cnt); in pblk_rl_gc_in()
Dpblk.h185 unsigned int nr_entries; /* Number of entries in write buffer - member
419 unsigned int nr_entries; /* Number of emeta entries */ member
726 unsigned int nr_entries, unsigned int *pos);
727 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
739 unsigned int pos, unsigned int nr_entries,
746 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
748 unsigned int nr_entries);
908 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
909 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
910 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
[all …]
Dpblk-write.c148 unsigned int nr_entries) in pblk_prepare_resubmit() argument
159 for (i = 0; i < nr_entries; i++) { in pblk_prepare_resubmit()
293 if (sync == emeta->nr_entries) in pblk_end_io_write_meta()
/drivers/dma/dw-edma/
Ddw-edma-v0-debugfs.c98 int nr_entries, struct dentry *dir) in dw_edma_debugfs_create_x32() argument
102 for (i = 0; i < nr_entries; i++) { in dw_edma_debugfs_create_x32()
112 int nr_entries; in dw_edma_debugfs_regs_ch() local
125 nr_entries = ARRAY_SIZE(debugfs_regs); in dw_edma_debugfs_regs_ch()
126 dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); in dw_edma_debugfs_regs_ch()
167 int nr_entries, i; in dw_edma_debugfs_regs_wr() local
174 nr_entries = ARRAY_SIZE(debugfs_regs); in dw_edma_debugfs_regs_wr()
175 dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); in dw_edma_debugfs_regs_wr()
178 nr_entries = ARRAY_SIZE(debugfs_unroll_regs); in dw_edma_debugfs_regs_wr()
179 dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, in dw_edma_debugfs_regs_wr()
[all …]
/drivers/md/persistent-data/
Ddm-btree-remove.c58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in node_shift() local
63 BUG_ON(shift > nr_entries); in node_shift()
67 (nr_entries - shift) * sizeof(__le64)); in node_shift()
70 (nr_entries - shift) * value_size); in node_shift()
72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift()
75 nr_entries * sizeof(__le64)); in node_shift()
78 nr_entries * value_size); in node_shift()
84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in node_copy()
113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); in delete_at() local
114 unsigned nr_to_copy = nr_entries - (index + 1); in delete_at()
[all …]
Ddm-btree.c43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); in bsearch()
75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in inc_children() local
78 for (i = 0; i < nr_entries; i++) in inc_children()
81 for (i = 0; i < nr_entries; i++) in inc_children()
89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); in insert_at() local
93 if (index > nr_entries || in insert_at()
95 nr_entries >= max_entries) { in insert_at()
103 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); in insert_at()
104 array_insert(value_base(node), value_size, nr_entries, index, value); in insert_at()
105 node->header.nr_entries = cpu_to_le32(nr_entries + 1); in insert_at()
[all …]
Ddm-array.c26 __le32 nr_entries; member
113 unsigned i, nr_entries = le32_to_cpu(ab->nr_entries); in on_entries() local
115 for (i = 0; i < nr_entries; i++) in on_entries()
164 (*ab)->nr_entries = cpu_to_le32(0); in alloc_ablock()
179 uint32_t nr_entries; in fill_ablock() local
183 BUG_ON(new_nr < le32_to_cpu(ab->nr_entries)); in fill_ablock()
185 nr_entries = le32_to_cpu(ab->nr_entries); in fill_ablock()
186 for (i = nr_entries; i < new_nr; i++) { in fill_ablock()
191 ab->nr_entries = cpu_to_le32(new_nr); in fill_ablock()
203 uint32_t nr_entries; in trim_ablock() local
[all …]
Ddm-bitset.c230 dm_block_t root, uint32_t nr_entries, in dm_bitset_cursor_begin() argument
236 if (!nr_entries) in dm_bitset_cursor_begin()
240 c->entries_remaining = nr_entries; in dm_bitset_cursor_begin()
Ddm-block-manager.c39 unsigned int nr_entries; member
86 t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2); in __add_holder()
108 lock->traces[i].nr_entries, 4); in __check_holder()
Ddm-bitset.h195 dm_block_t root, uint32_t nr_entries,
Ddm-btree-internal.h33 __le32 nr_entries; member
Ddm-btree-spine.c70 if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { in node_check()
/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum1_kvdl.c122 unsigned int entry_index, nr_entries; in mlxsw_sp1_kvdl_part_alloc() local
124 nr_entries = (info->end_index - info->start_index + 1) / in mlxsw_sp1_kvdl_part_alloc()
126 entry_index = find_first_zero_bit(part->usage, nr_entries); in mlxsw_sp1_kvdl_part_alloc()
127 if (entry_index == nr_entries) in mlxsw_sp1_kvdl_part_alloc()
215 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_init() local
227 nr_entries = div_u64(resource_size, info->alloc_size); in mlxsw_sp1_kvdl_part_init()
228 usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); in mlxsw_sp1_kvdl_part_init()
281 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_occ() local
285 nr_entries = (info->end_index - in mlxsw_sp1_kvdl_part_occ()
288 while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) in mlxsw_sp1_kvdl_part_occ()
[all …]
/drivers/md/
Ddm-cache-policy-internal.h115 static inline size_t bitset_size_in_bytes(unsigned nr_entries) in bitset_size_in_bytes() argument
117 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); in bitset_size_in_bytes()
120 static inline unsigned long *alloc_bitset(unsigned nr_entries) in alloc_bitset() argument
122 size_t s = bitset_size_in_bytes(nr_entries); in alloc_bitset()
126 static inline void clear_bitset(void *bitset, unsigned nr_entries) in clear_bitset() argument
128 size_t s = bitset_size_in_bytes(nr_entries); in clear_bitset()
Ddm-cache-policy-smq.c65 static int space_init(struct entry_space *es, unsigned nr_entries) in space_init() argument
67 if (!nr_entries) { in space_init()
72 es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry))); in space_init()
76 es->end = es->begin + nr_entries; in space_init()
583 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries) in h_init() argument
588 nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u)); in h_init()
/drivers/pci/pcie/
Dportdrv_core.c103 int nr_entries, nvec, pcie_irq; in pcie_port_enable_irq_vec() local
107 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES, in pcie_port_enable_irq_vec()
109 if (nr_entries < 0) in pcie_port_enable_irq_vec()
110 return nr_entries; in pcie_port_enable_irq_vec()
114 if (nvec > nr_entries) { in pcie_port_enable_irq_vec()
130 if (nvec != nr_entries) { in pcie_port_enable_irq_vec()
133 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, in pcie_port_enable_irq_vec()
135 if (nr_entries < 0) in pcie_port_enable_irq_vec()
136 return nr_entries; in pcie_port_enable_irq_vec()
/drivers/firmware/efi/libstub/
Dx86-stub.c434 struct setup_data *e820ext, u32 nr_entries) in add_e820ext() argument
439 e820ext->len = nr_entries * sizeof(struct boot_e820_entry); in add_e820ext()
459 u32 nr_entries; in setup_e820() local
463 nr_entries = 0; in setup_e820()
525 if (nr_entries == ARRAY_SIZE(params->e820_table)) { in setup_e820()
540 nr_entries++; in setup_e820()
543 if (nr_entries > ARRAY_SIZE(params->e820_table)) { in setup_e820()
544 u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table); in setup_e820()
547 nr_entries -= nr_e820ext; in setup_e820()
550 params->e820_entries = (u8)nr_entries; in setup_e820()
/drivers/pci/
Dmsi.c669 static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) in msix_map_region() argument
686 return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); in msix_map_region()
995 int nr_entries; in __pci_enable_msix() local
1001 nr_entries = pci_msix_vec_count(dev); in __pci_enable_msix()
1002 if (nr_entries < 0) in __pci_enable_msix()
1003 return nr_entries; in __pci_enable_msix()
1004 if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) in __pci_enable_msix()
1005 return nr_entries; in __pci_enable_msix()
1010 if (entries[i].entry >= nr_entries) in __pci_enable_msix()
/drivers/gpu/drm/i915/
Dintel_runtime_pm.c72 unsigned int nr_entries; in __print_depot_stack() local
74 nr_entries = stack_depot_fetch(stack, &entries); in __print_depot_stack()
75 stack_trace_snprint(buf, sz, entries, nr_entries, indent); in __print_depot_stack()
Di915_vma.c63 unsigned int nr_entries; in vma_print_allocator() local
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator()
73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); in vma_print_allocator()
/drivers/pci/hotplug/
Dpnv_php.c723 int nr_entries, ret; in pnv_php_enable_msix() local
727 nr_entries = pci_msix_vec_count(pdev); in pnv_php_enable_msix()
728 if (nr_entries < 0) in pnv_php_enable_msix()
729 return nr_entries; in pnv_php_enable_msix()
734 if (entry.entry >= nr_entries) in pnv_php_enable_msix()
/drivers/hwtracing/coresight/
Dcoresight-tmc-etr.c491 int i, type, nr_entries; in tmc_etr_sg_table_populate() local
502 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages); in tmc_etr_sg_table_populate()
511 for (i = 0; i < nr_entries - 1; i++) { in tmc_etr_sg_table_populate()
567 int nr_entries, nr_tpages; in tmc_init_etr_sg_table() local
575 nr_entries = tmc_etr_sg_table_entries(nr_dpages); in tmc_init_etr_sg_table()
576 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE); in tmc_init_etr_sg_table()
/drivers/gpu/drm/
Ddrm_mm.c122 unsigned int nr_entries; in show_leaks() local
136 nr_entries = stack_depot_fetch(node->stack, &entries); in show_leaks()
137 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); in show_leaks()

12