Home
last modified time | relevance | path

Searched refs:entries (Results 1 – 25 of 128) sorted by relevance

123456

/arch/powerpc/mm/book3s64/
Diommu_api.c34 u64 entries; /* number of entries in hpas/hpages[] */ member
57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument
66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc()
70 locked_entries = entries; in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
102 chunk = min(chunk, entries); in mm_iommu_do_alloc()
103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc()
104 unsigned long n = min(entries - entry, chunk); in mm_iommu_do_alloc()
[all …]
/arch/x86/kernel/cpu/
Dintel.c829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
835 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
836 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
837 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
838 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
839 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup()
840 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup()
[all …]
/arch/x86/xen/
Dmulticalls.c41 struct multicall_entry entries[MC_BATCH]; member
73 memcpy(b->debug, b->entries, in xen_mc_flush()
86 mc = &b->entries[0]; in xen_mc_flush()
95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) in xen_mc_flush()
98 if (b->entries[i].result < 0) in xen_mc_flush()
106 if (b->entries[i].result < 0) { in xen_mc_flush()
112 b->entries[i].result, in xen_mc_flush()
117 b->entries[i].op, in xen_mc_flush()
118 b->entries[i].args[0], in xen_mc_flush()
119 b->entries[i].result); in xen_mc_flush()
[all …]
Dsetup.c202 const struct e820_entry *entry = xen_e820_table.entries; in xen_find_pfn_range()
459 const struct e820_entry *entry = xen_e820_table.entries; in xen_foreach_remap_area()
605 struct e820_entry *entry = xen_e820_table.entries; in xen_ignore_unusable()
624 entry = xen_e820_table.entries; in xen_is_e820_reserved()
649 struct e820_entry *entry = xen_e820_table.entries; in xen_find_free_area()
745 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); in xen_memory_setup()
746 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); in xen_memory_setup()
759 xen_e820_table.entries[0].addr = 0ULL; in xen_memory_setup()
760 xen_e820_table.entries[0].size = mem_end; in xen_memory_setup()
762 xen_e820_table.entries[0].size += 8ULL << 20; in xen_memory_setup()
[all …]
/arch/arm64/kvm/vgic/
Dvgic-irqfd.c137 struct kvm_irq_routing_entry *entries; in kvm_vgic_setup_default_irq_routing() local
142 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL_ACCOUNT); in kvm_vgic_setup_default_irq_routing()
143 if (!entries) in kvm_vgic_setup_default_irq_routing()
147 entries[i].gsi = i; in kvm_vgic_setup_default_irq_routing()
148 entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_vgic_setup_default_irq_routing()
149 entries[i].u.irqchip.irqchip = 0; in kvm_vgic_setup_default_irq_routing()
150 entries[i].u.irqchip.pin = i; in kvm_vgic_setup_default_irq_routing()
152 ret = kvm_set_irq_routing(kvm, entries, nr, 0); in kvm_vgic_setup_default_irq_routing()
153 kfree(entries); in kvm_vgic_setup_default_irq_routing()
/arch/x86/kernel/
De820.c83 struct e820_entry *entry = &table->entries[i]; in _e820__mapped_any()
118 struct e820_entry *entry = &e820_table->entries[i]; in __e820__mapped_all()
170 if (x >= ARRAY_SIZE(table->entries)) { in __e820__range_add()
176 table->entries[x].addr = start; in __e820__range_add()
177 table->entries[x].size = size; in __e820__range_add()
178 table->entries[x].type = type; in __e820__range_add()
210 e820_table->entries[i].addr, in e820__print_table()
211 e820_table->entries[i].addr + e820_table->entries[i].size - 1); in e820__print_table()
213 e820_print_type(e820_table->entries[i].type); in e820__print_table()
324 struct e820_entry *entries = table->entries; in e820__update_table() local
[all …]
Dldt.c83 set_ldt(ldt->entries, ldt->nr_entries); in load_mm_ldt()
171 new_ldt->entries = __vmalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); in alloc_ldt_struct()
173 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); in alloc_ldt_struct()
175 if (!new_ldt->entries) { in alloc_ldt_struct()
307 is_vmalloc = is_vmalloc_addr(ldt->entries); in map_ldt_struct()
313 const void *src = (char *)ldt->entries + offset; in map_ldt_struct()
416 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); in finalize_ldt_struct()
437 paravirt_free_ldt(ldt->entries, ldt->nr_entries); in free_ldt_struct()
439 vfree_atomic(ldt->entries); in free_ldt_struct()
441 free_page((unsigned long)ldt->entries); in free_ldt_struct()
[all …]
/arch/s390/include/asm/
Dmem_detect.h33 struct mem_detect_block entries[MEM_INLINED_ENTRIES]; member
50 *start = (unsigned long)mem_detect.entries[n].start; in __get_mem_detect_block()
51 *end = (unsigned long)mem_detect.entries[n].end; in __get_mem_detect_block()
/arch/parisc/kernel/
Dpdt.c150 unsigned long entries; in pdc_pdt_init() local
174 entries = pdt_status.pdt_entries; in pdc_pdt_init()
175 if (WARN_ON(entries > MAX_PDT_ENTRIES)) in pdc_pdt_init()
176 entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES; in pdc_pdt_init()
187 if (entries == 0) { in pdc_pdt_init()
197 entries); in pdc_pdt_init()
Dinventory.c310 int entries; in pat_memconfig() local
332 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); in pat_memconfig()
334 if (entries > PAT_MAX_RANGES) { in pat_memconfig()
347 for (i = 0; i < entries; i++,mtbl_ptr++) { in pat_memconfig()
404 int entries; in sprockets_memconfig() local
427 entries = (int)r_addr.entries_returned; in sprockets_memconfig()
432 for (i = 0; i < entries; i++,mtbl_ptr++) { in sprockets_memconfig()
/arch/mips/mti-malta/
Dmalta-dtshim.c74 unsigned entries; in gen_fdt_mem_array() local
76 entries = 1; in gen_fdt_mem_array()
108 entries++; in gen_fdt_mem_array()
121 entries++; in gen_fdt_mem_array()
127 BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES); in gen_fdt_mem_array()
128 return entries; in gen_fdt_mem_array()
/arch/arm/mm/
Dproc-arm940.S111 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
114 bcs 2b @ entries 63 to 0
161 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
164 bcs 2b @ entries 63 to 0
183 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
186 bcs 2b @ entries 63 to 0
206 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
209 bcs 2b @ entries 63 to 0
228 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
236 bcs 2b @ entries 63 to 0
/arch/powerpc/kvm/
De500_mmu_host.c38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
45 return host_tlb_params[1].entries - tlbcam_index - 1; in tlb1_max_shadow_size()
275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); in clear_tlb1_bitmap()
278 sizeof(unsigned int) * host_tlb_params[1].entries); in clear_tlb1_bitmap()
287 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { in clear_tlb_privs()
760 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; in e500_mmu_host_init()
761 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; in e500_mmu_host_init()
768 if (host_tlb_params[0].entries == 0 || in e500_mmu_host_init()
769 host_tlb_params[1].entries == 0) { in e500_mmu_host_init()
776 host_tlb_params[1].ways = host_tlb_params[1].entries; in e500_mmu_host_init()
[all …]
De500_mmu.c73 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; in get_tlb_esel()
83 int size = vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_tlb_index()
153 int size = vcpu_e500->gtlb_params[1].entries; in kvmppc_recalc_tlb1map_range()
233 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
236 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
258 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_emul_tlbivax()
282 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { in tlbilx_all()
824 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb()
825 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb()
875 vcpu->arch.tlbcfg[0] |= params[0].entries; in vcpu_mmu_init()
[all …]
/arch/x86/kvm/
Dcpuid.c60 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) in cpuid_entry2_find() argument
66 e = &entries[i]; in cpuid_entry2_find()
76 static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent) in kvm_check_cpuid() argument
84 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); in kvm_check_cpuid()
257 struct kvm_cpuid_entry __user *entries) in kvm_vcpu_ioctl_set_cpuid() argument
267 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent)); in kvm_vcpu_ioctl_set_cpuid()
302 struct kvm_cpuid_entry2 __user *entries) in kvm_vcpu_ioctl_set_cpuid2() argument
311 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent)); in kvm_vcpu_ioctl_set_cpuid2()
325 struct kvm_cpuid_entry2 __user *entries) in kvm_vcpu_ioctl_get_cpuid2() argument
333 if (copy_to_user(entries, vcpu->arch.cpuid_entries, in kvm_vcpu_ioctl_get_cpuid2()
[all …]
Dcpuid.h19 struct kvm_cpuid_entry2 __user *entries,
23 struct kvm_cpuid_entry __user *entries);
26 struct kvm_cpuid_entry2 __user *entries);
29 struct kvm_cpuid_entry2 __user *entries);
/arch/x86/mm/
Dmem_encrypt_identity.c254 unsigned long entries = 0, tables = 0; in sme_pgtable_calc() local
271 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; in sme_pgtable_calc()
272 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc()
273 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc()
274 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; in sme_pgtable_calc()
282 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; in sme_pgtable_calc()
283 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc()
284 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc()
286 return entries + tables; in sme_pgtable_calc()
/arch/mips/generic/
Dyamon-dt.c51 unsigned int entries = 0; in gen_fdt_mem_array() local
54 if (entries >= max_entries) { in gen_fdt_mem_array()
67 ++entries; in gen_fdt_mem_array()
72 return entries; in gen_fdt_mem_array()
/arch/sh/kernel/
Dstacktrace.c34 trace->entries[trace->nr_entries++] = addr; in save_stack_address()
66 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
/arch/x86/um/
Dldt.c73 if (copy_to_user(ptr, ldt->u.entries, size)) in read_ldt()
161 memcpy(&entry0, ldt->u.entries, in write_ldt()
175 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, in write_ldt()
185 ldt_p = ldt->u.entries + ldt_info.entry_number; in write_ldt()
339 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, in init_new_ldt()
340 sizeof(new_mm->arch.ldt.u.entries)); in init_new_ldt()
/arch/openrisc/kernel/
Dstacktrace.c39 trace->entries[trace->nr_entries++] = addr; in save_stack_address()
65 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
/arch/arm/kernel/
Dunwind.c61 int entries; /* number of entries left to interpret */ member
211 if (ctrl->entries <= 0) { in unwind_get_byte()
220 ctrl->entries--; in unwind_get_byte()
366 ctrl->entries = 0; in unwind_exec_insn()
444 ctrl.entries = 1; in unwind_frame()
447 ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); in unwind_frame()
456 while (ctrl.entries > 0) { in unwind_frame()
/arch/sparc/kernel/
Dstacktrace.c58 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace()
68 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace()
/arch/arm64/kvm/hyp/nvhe/
Dtrace.c164 atomic_add(atomic_read(&new_tail->entries), &cpu_buffer->overrun); in rb_move_tail()
181 atomic_set(&new_tail->entries, 0); in rb_move_tail()
229 if (!atomic_read(&tail_page->entries)) { in rb_reserve_next()
238 atomic_inc(&tail_page->entries); in rb_reserve_next()
271 unsigned long entries, pages_touched, overrun; in rb_update_footers() local
281 entries = atomic_read(&cpu_buffer->nr_entries); in rb_update_footers()
282 footer->stats.entries = entries; in rb_update_footers()
/arch/mips/kernel/
Dstacktrace.c30 trace->entries[trace->nr_entries++] = addr; in save_raw_context_stack()
58 trace->entries[trace->nr_entries++] = pc; in save_context_stack()

123456