/arch/x86/kernel/cpu/ |
D | intel.c | 596 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 597 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 598 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 599 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 602 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 603 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 604 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 605 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 606 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 607 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() [all …]
|
/arch/x86/kernel/ |
D | ldt.c | 33 set_ldt(pc->ldt->entries, pc->ldt->size); in flush_ldt() 59 new_ldt->entries = vzalloc(alloc_size); in alloc_ldt_struct() 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); in alloc_ldt_struct() 63 if (!new_ldt->entries) { in alloc_ldt_struct() 75 paravirt_alloc_ldt(ldt->entries, ldt->size); in finalize_ldt_struct() 94 paravirt_free_ldt(ldt->entries, ldt->size); in free_ldt_struct() 96 vfree(ldt->entries); in free_ldt_struct() 98 kfree(ldt->entries); in free_ldt_struct() 131 memcpy(new_ldt->entries, old_mm->context.ldt->entries, in init_new_context() 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { in read_ldt() [all …]
|
D | stacktrace.c | 32 trace->entries[trace->nr_entries++] = addr; in __save_stack_address() 65 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 73 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_regs() 80 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk() 114 trace->entries[trace->nr_entries++] = regs->ip; in __save_stack_trace_user() 126 trace->entries[trace->nr_entries++] = in __save_stack_trace_user() 144 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_user()
|
/arch/x86/xen/ |
D | multicalls.c | 40 struct multicall_entry entries[MC_BATCH]; member 80 mc = &b->entries[0]; in xen_mc_flush() 90 memcpy(b->debug, b->entries, in xen_mc_flush() 94 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) in xen_mc_flush() 97 if (b->entries[i].result < 0) in xen_mc_flush() 110 b->entries[i].result, in xen_mc_flush() 151 ret.mc = &b->entries[b->mcidx]; in __xen_mc_entry() 172 b->entries[b->mcidx - 1].op != op)) { in xen_mc_extend_args() 182 ret.mc = &b->entries[b->mcidx - 1]; in xen_mc_extend_args()
|
/arch/sh/kernel/ |
D | stacktrace.c | 41 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 55 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 76 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched() 90 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/arch/parisc/kernel/ |
D | stacktrace.c | 41 trace->entries[trace->nr_entries++] = info.ip; in dump_trace() 53 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 61 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
D | inventory.c | 293 int entries; in pat_memconfig() local 315 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); in pat_memconfig() 317 if (entries > PAT_MAX_RANGES) { in pat_memconfig() 330 for (i = 0; i < entries; i++,mtbl_ptr++) { in pat_memconfig() 387 int entries; in sprockets_memconfig() local 410 entries = (int)r_addr.entries_returned; in sprockets_memconfig() 415 for (i = 0; i < entries; i++,mtbl_ptr++) { in sprockets_memconfig()
|
/arch/arm/kernel/ |
D | stacktrace.c | 84 trace->entries[trace->nr_entries++] = addr; in save_trace() 103 trace->entries[trace->nr_entries++] = regs->ARM_pc; in save_trace() 128 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace() 149 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace() 168 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_regs()
|
D | unwind.c | 77 int entries; /* number of entries left to interpret */ member 227 if (ctrl->entries <= 0) { in unwind_get_byte() 236 ctrl->entries--; in unwind_get_byte() 355 ctrl->entries = 0; in unwind_exec_insn() 433 ctrl.entries = 1; in unwind_frame() 436 ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); in unwind_frame() 445 while (ctrl.entries > 0) { in unwind_frame()
|
/arch/x86/kvm/ |
D | cpuid.h | 10 struct kvm_cpuid_entry2 __user *entries, 14 struct kvm_cpuid_entry __user *entries); 17 struct kvm_cpuid_entry2 __user *entries); 20 struct kvm_cpuid_entry2 __user *entries);
|
/arch/arm/mm/ |
D | proc-arm940.S | 115 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 118 bcs 2b @ entries 63 to 0 165 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 168 bcs 2b @ entries 63 to 0 187 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 190 bcs 2b @ entries 63 to 0 210 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 213 bcs 2b @ entries 63 to 0 232 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 240 bcs 2b @ entries 63 to 0
|
/arch/sh/mm/ |
D | tlb-sh5.c | 23 cpu_data->dtlb.entries = 64; in sh64_tlb_init() 30 ((cpu_data->dtlb.entries - 1) * in sh64_tlb_init() 34 cpu_data->itlb.entries = 64; in sh64_tlb_init() 40 ((cpu_data->itlb.entries - 1) * in sh64_tlb_init()
|
/arch/powerpc/kvm/ |
D | e500_mmu_host.c | 40 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 47 return host_tlb_params[1].entries - tlbcam_index - 1; in tlb1_max_shadow_size() 277 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); in clear_tlb1_bitmap() 280 sizeof(unsigned int) * host_tlb_params[1].entries); in clear_tlb1_bitmap() 289 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { in clear_tlb_privs() 755 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; in e500_mmu_host_init() 756 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; in e500_mmu_host_init() 763 if (host_tlb_params[0].entries == 0 || in e500_mmu_host_init() 764 host_tlb_params[1].entries == 0) { in e500_mmu_host_init() 771 host_tlb_params[1].ways = host_tlb_params[1].entries; in e500_mmu_host_init() [all …]
|
D | e500_mmu.c | 76 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; in get_tlb_esel() 86 int size = vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_tlb_index() 156 int size = vcpu_e500->gtlb_params[1].entries; in kvmppc_recalc_tlb1map_range() 236 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0() 239 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0() 261 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_emul_tlbivax() 285 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { in tlbilx_all() 828 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb() 829 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb() 881 vcpu->arch.tlbcfg[0] |= params[0].entries; in vcpu_mmu_init() [all …]
|
/arch/s390/kernel/ |
D | stacktrace.c | 31 trace->entries[trace->nr_entries++] = addr; in save_context_stack() 52 trace->entries[trace->nr_entries++] = addr; in save_context_stack() 94 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/arch/x86/include/asm/ |
D | setup.h | 103 #define RESERVE_BRK_ARRAY(type, name, entries) \ argument 105 RESERVE_BRK(name, sizeof(type) * entries)
|
D | mmu_context.h | 33 struct desc_struct *entries; member 59 set_ldt(ldt->entries, ldt->size); in load_mm_ldt()
|
/arch/um/kernel/ |
D | stacktrace.c | 56 trace->entries[trace->nr_entries++] = address; in save_addr() 67 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace()
|
/arch/sparc/kernel/ |
D | stacktrace.c | 56 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 64 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace()
|
/arch/mips/kernel/ |
D | stacktrace.c | 26 trace->entries[trace->nr_entries++] = addr; in save_raw_context_stack() 53 trace->entries[trace->nr_entries++] = pc; in save_context_stack()
|
/arch/metag/kernel/ |
D | stacktrace.c | 137 trace->entries[trace->nr_entries++] = addr; in save_trace() 158 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk() 179 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/arch/x86/um/ |
D | ldt.c | 156 if (copy_to_user(ptr, ldt->u.entries, size)) in read_ldt() 250 memcpy(&entry0, ldt->u.entries, in write_ldt() 264 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, in write_ldt() 274 ldt_p = ldt->u.entries + ldt_info.entry_number; in write_ldt() 462 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, in init_new_ldt() 463 sizeof(new_mm->arch.ldt.u.entries)); in init_new_ldt()
|
/arch/arm64/kernel/ |
D | stacktrace.c | 91 trace->entries[trace->nr_entries++] = addr; in save_trace() 118 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/arch/powerpc/sysdev/ |
D | fsl_rio.h | 127 void *dev_id, int mbox, int entries); 130 void *dev_id, int mbox, int entries);
|
/arch/unicore32/kernel/ |
D | stacktrace.c | 92 trace->entries[trace->nr_entries++] = addr; in save_trace() 123 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|