/arch/blackfin/kernel/cplb-nompu/ |
D | cplbinit.c | 101 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size; in generate_cplb_tables_all() 103 dcplb_bounds[i_d].eaddr = memory_end; in generate_cplb_tables_all() 108 dcplb_bounds[i_d].eaddr = _ramend; in generate_cplb_tables_all() 113 dcplb_bounds[i_d].eaddr = physical_mem_end; in generate_cplb_tables_all() 118 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE; in generate_cplb_tables_all() 121 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; in generate_cplb_tables_all() 124 dcplb_bounds[i_d].eaddr = BOOT_ROM_START; in generate_cplb_tables_all() 127 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024); in generate_cplb_tables_all() 131 dcplb_bounds[i_d].eaddr = L2_START; in generate_cplb_tables_all() 134 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH; in generate_cplb_tables_all() [all …]
|
D | cplbmgr.c | 159 unsigned long i_data, base, addr1, eaddr; in icplb_miss() local 168 eaddr = icplb_bounds[idx].eaddr; in icplb_miss() 169 if (addr < eaddr) in icplb_miss() 171 base = eaddr; in icplb_miss() 184 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { in icplb_miss() 206 unsigned long d_data, base, addr1, eaddr; in dcplb_miss() local 215 eaddr = dcplb_bounds[idx].eaddr; in dcplb_miss() 216 if (addr < eaddr) in dcplb_miss() 218 base = eaddr; in dcplb_miss() 231 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { in dcplb_miss()
|
/arch/sh/mm/ |
D | cache-sh5.c | 34 #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) argument 37 #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0) argument 46 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, in sh64_setup_dtlb_cache_slot() argument 50 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); in sh64_setup_dtlb_cache_slot() 100 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument 108 addr = eaddr; in sh64_icache_inv_user_page() 172 unsigned long eaddr; in sh64_icache_inv_user_page_range() local 201 eaddr = aligned_start; in sh64_icache_inv_user_page_range() 202 while (eaddr < vma_end) { in sh64_icache_inv_user_page_range() 203 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range() [all …]
|
D | tlb-sh5.c | 121 unsigned long eaddr, in sh64_setup_tlb_slot() argument 129 pteh = (unsigned long long)(signed long long)(signed long) eaddr; in sh64_setup_tlb_slot()
|
/arch/powerpc/kvm/ |
D | 44x_tlb.h | 26 extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, 28 extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 29 extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 88 static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr) in tlb_xlate() argument 92 return get_tlb_raddr(tlbe) | (eaddr & pgmask); in tlb_xlate()
|
D | 44x_tlb.c | 178 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, in kvmppc_44x_tlb_index() argument 189 if (eaddr < get_tlb_eaddr(tlbe)) in kvmppc_44x_tlb_index() 192 if (eaddr > get_tlb_end(tlbe)) in kvmppc_44x_tlb_index() 211 int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) in kvmppc_44x_itlb_index() argument 215 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); in kvmppc_44x_itlb_index() 218 int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) in kvmppc_44x_dtlb_index() argument 222 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); in kvmppc_44x_dtlb_index() 452 gva_t eaddr; in kvmppc_44x_emul_tlbwe() local 457 eaddr = get_tlb_eaddr(tlbe); in kvmppc_44x_emul_tlbwe() 462 eaddr &= ~(bytes - 1); in kvmppc_44x_emul_tlbwe() [all …]
|
D | 44x.c | 155 gva_t eaddr; in kvmppc_core_vcpu_translate() local 159 eaddr = tr->linear_address; in kvmppc_core_vcpu_translate() 163 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); in kvmppc_core_vcpu_translate() 171 tr->physical_address = tlb_xlate(gtlbe, eaddr); in kvmppc_core_vcpu_translate()
|
D | booke.c | 291 unsigned long eaddr = vcpu->arch.fault_dear; in kvmppc_handle_exit() local 296 gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); in kvmppc_handle_exit() 308 vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); in kvmppc_handle_exit() 318 kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, in kvmppc_handle_exit() 336 unsigned long eaddr = vcpu->arch.pc; in kvmppc_handle_exit() local 344 gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); in kvmppc_handle_exit() 355 gpaddr = tlb_xlate(gtlbe, eaddr); in kvmppc_handle_exit() 365 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, in kvmppc_handle_exit()
|
/arch/arm/mm/ |
D | alignment.c | 424 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 438 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 444 eaddr = newaddr; in do_alignment_ldmstm() 447 eaddr += 4; in do_alignment_ldmstm() 461 if (addr != eaddr) { in do_alignment_ldmstm() 464 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 475 get32t_unaligned_check(val, eaddr); in do_alignment_ldmstm() 478 put32t_unaligned_check(regs->uregs[rd], eaddr); in do_alignment_ldmstm() 479 eaddr += 4; in do_alignment_ldmstm() 487 get32_unaligned_check(val, eaddr); in do_alignment_ldmstm() [all …]
|
/arch/blackfin/include/asm/ |
D | cplbinit.h | 48 unsigned long eaddr; /* End of this region. */ member
|
/arch/sh/include/asm/ |
D | tlb_64.h | 59 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|
/arch/mips/include/asm/ |
D | sgialib.h | 109 extern long prom_load(char *name, unsigned long end, unsigned long *pc, unsigned long *eaddr);
|
/arch/ia64/mm/ |
D | init.c | 156 unsigned long addr, eaddr; in free_initmem() local 159 eaddr = (unsigned long) ia64_imva(__init_end); in free_initmem() 160 while (addr < eaddr) { in free_initmem()
|
/arch/mips/txx9/generic/ |
D | setup.c | 411 unsigned long eaddr = __pa_symbol(&_text); in prom_free_prom_memory() local 413 if (saddr < eaddr) in prom_free_prom_memory() 414 free_init_pages("prom memory", saddr, eaddr); in prom_free_prom_memory()
|