Home
last modified time | relevance | path

Searched refs:mmu (Results 1 – 25 of 157) sorted by relevance

1234567

/arch/x86/kernel/
Dparavirt.c232 .mmu.flush_tlb_user = native_flush_tlb_local,
233 .mmu.flush_tlb_kernel = native_flush_tlb_global,
234 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
235 .mmu.flush_tlb_multi = native_flush_tlb_multi,
236 .mmu.tlb_remove_table = native_tlb_remove_table,
238 .mmu.exit_mmap = paravirt_nop,
239 .mmu.notify_page_enc_status_changed = paravirt_nop,
242 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
243 .mmu.write_cr2 = pv_native_write_cr2,
244 .mmu.read_cr3 = __native_read_cr3,
[all …]
/arch/arm64/kvm/hyp/nvhe/
Dtlb.c14 struct kvm_s2_mmu *mmu; member
19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument
23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context()
29 cxt->mmu = NULL; in enter_vmid_context()
59 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()
62 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
65 if (mmu == host_s2_mmu) in enter_vmid_context()
68 cxt->mmu = host_s2_mmu; in enter_vmid_context()
110 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context()
117 struct kvm_s2_mmu *mmu = cxt->mmu; in exit_vmid_context() local
[all …]
/arch/arc/mm/
Dtlb.c136 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in local_flush_tlb_all() local
139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
571 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in arc_mmu_mumbojumbo() local
579 mmu->ver = (bcr >> 24); in arc_mmu_mumbojumbo()
581 if (is_isa_arcompact() && mmu->ver == 3) { in arc_mmu_mumbojumbo()
583 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); in arc_mmu_mumbojumbo()
584 mmu->sets = 1 << mmu3->sets; in arc_mmu_mumbojumbo()
585 mmu->ways = 1 << mmu3->ways; in arc_mmu_mumbojumbo()
591 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); in arc_mmu_mumbojumbo()
592 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); in arc_mmu_mumbojumbo()
[all …]
/arch/arm64/kvm/hyp/vhe/
Dtlb.c19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
56 __load_stage2(mmu, mmu->arch); in __tlb_switch_to_guest()
82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
90 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
114 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa_nsh() argument
122 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa_nsh()
146 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
162 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_range()
174 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
181 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid()
[all …]
/arch/x86/include/asm/
Dparavirt.h71 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local()
76 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global()
81 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); in __flush_tlb_one_user()
87 PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); in __flush_tlb_multi()
92 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); in paravirt_tlb_remove_table()
97 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap()
103 PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); in notify_page_enc_status_changed()
144 return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2, in read_cr2()
151 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2()
156 return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3, in __read_cr3()
[all …]
/arch/x86/kvm/mmu/
Dpaging_tmpl.h31 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
45 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled) argument
109 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME()
115 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in FNAME()
148 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME()
159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME()
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
[all …]
Dmmu.c224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
226 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
237 static inline bool is_cr0_pg(struct kvm_mmu *mmu) in is_cr0_pg() argument
239 return mmu->cpu_role.base.level > 0; in is_cr0_pg()
242 static inline bool is_cr4_pae(struct kvm_mmu *mmu) in is_cr4_pae() argument
244 return !mmu->cpu_role.base.has_4_byte_gpte; in is_cr4_pae()
264 struct kvm_mmu *mmu) in kvm_mmu_get_guest_pgd() argument
266 if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) in kvm_mmu_get_guest_pgd()
269 return mmu->get_guest_pgd(vcpu); in kvm_mmu_get_guest_pgd()
646 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; in is_tdp_mmu_active()
[all …]
/arch/x86/kvm/
Dmmu.h117 struct kvm_mmu *mmu);
129 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) in kvm_mmu_reload()
151 u64 root_hpa = vcpu->arch.mmu->root.hpa; in kvm_mmu_load_pgd()
157 vcpu->arch.mmu->root_role.level); in kvm_mmu_load_pgd()
161 struct kvm_mmu *mmu) in kvm_mmu_refresh_passthrough_bits() argument
172 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) in kvm_mmu_refresh_passthrough_bits()
175 __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in kvm_mmu_refresh_passthrough_bits()
186 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument
212 kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in permission_fault()
214 fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
[all …]
DMakefile14 hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
15 mmu/spte.o
21 kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
/arch/um/kernel/skas/
Dmmu.c62 struct mm_context *mmu = &mm->context; in destroy_context() local
70 if (mmu->id.u.pid < 2) { in destroy_context()
72 mmu->id.u.pid); in destroy_context()
75 os_kill_ptraced_process(mmu->id.u.pid, 1); in destroy_context()
77 free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); in destroy_context()
78 free_ldt(mmu); in destroy_context()
/arch/powerpc/kvm/
Dbook3s_32_mmu.c399 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local
401 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init()
402 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init()
403 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init()
404 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init()
405 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; in kvmppc_mmu_book3s_32_init()
406 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; in kvmppc_mmu_book3s_32_init()
407 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; in kvmppc_mmu_book3s_32_init()
409 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init()
410 mmu->slbmfee = NULL; in kvmppc_mmu_book3s_32_init()
[all …]
Dbook3s_64_mmu.c653 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_64_init() local
655 mmu->mfsrin = NULL; in kvmppc_mmu_book3s_64_init()
656 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; in kvmppc_mmu_book3s_64_init()
657 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; in kvmppc_mmu_book3s_64_init()
658 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; in kvmppc_mmu_book3s_64_init()
659 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; in kvmppc_mmu_book3s_64_init()
660 mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; in kvmppc_mmu_book3s_64_init()
661 mmu->slbie = kvmppc_mmu_book3s_64_slbie; in kvmppc_mmu_book3s_64_init()
662 mmu->slbia = kvmppc_mmu_book3s_64_slbia; in kvmppc_mmu_book3s_64_init()
663 mmu->xlate = kvmppc_mmu_book3s_64_xlate; in kvmppc_mmu_book3s_64_init()
[all …]
Dbook3s_emulate.c317 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr()
319 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr()
329 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr()
331 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr()
337 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr()
342 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr()
351 vcpu->arch.mmu.tlbie(vcpu, addr, large); in kvmppc_core_emulate_op_pr()
385 if (!vcpu->arch.mmu.slbmte) in kvmppc_core_emulate_op_pr()
388 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr()
393 if (!vcpu->arch.mmu.slbie) in kvmppc_core_emulate_op_pr()
[all …]
/arch/arm64/include/asm/
Dkvm_mmu.h179 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
181 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
293 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) in kvm_get_vttbr() argument
295 struct kvm_vmid *vmid = &mmu->vmid; in kvm_get_vttbr()
299 baddr = mmu->pgd_phys; in kvm_get_vttbr()
309 static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, in __load_stage2() argument
313 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); in __load_stage2()
323 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) in kvm_s2_mmu_to_kvm() argument
325 return container_of(mmu->arch, struct kvm, arch); in kvm_s2_mmu_to_kvm()
Dkvm_asm.h282 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
283 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
285 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
288 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
290 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
Dkvm_pgtable.h468 struct kvm_s2_mmu *mmu; member
592 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
597 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops, pte_ops) \ argument
598 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, pte_ops)
939 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
/arch/arm64/kvm/
Dmmu.c113 chunk_size = kvm->arch.mmu.split_page_chunk_size; in need_split_memcache_topup_or_resched()
115 cache = &kvm->arch.mmu.split_page_cache; in need_split_memcache_topup_or_resched()
129 chunk_size = kvm->arch.mmu.split_page_chunk_size; in kvm_mmu_split_huge_pages()
135 cache = &kvm->arch.mmu.split_page_cache; in kvm_mmu_split_huge_pages()
150 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages()
179 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); in kvm_arch_flush_remote_tlbs()
186 kvm_tlb_flush_vmid_range(&kvm->arch.mmu, in kvm_arch_flush_remote_tlbs_range()
383 return kvm_pgtable_stage2_unmap(kvm->arch.mmu.pgt, addr, size); in ___unmap_stage2_range()
388 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, in __unmap_stage2_range() argument
391 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in __unmap_stage2_range()
[all …]
Dptdump.c297 pgtable = kvm->arch.mmu.pgt; in kvm_ptdump_parser_init()
339 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; in kvm_ptdump_guest_show() local
345 ret = kvm_ptdump_show_common(m, mmu->pgt, &st->parser_state); in kvm_ptdump_guest_show()
430 struct kvm_s2_mmu *mmu; in kvm_pgtable_debugfs_open() local
446 mmu = &kvm->arch.mmu; in kvm_pgtable_debugfs_open()
447 pgtable = mmu->pgt; in kvm_pgtable_debugfs_open()
/arch/arm64/kvm/hyp/
Dpgtable.c532 pgt->mmu = NULL; in kvm_pgtable_hyp_init()
574 struct kvm_s2_mmu *mmu; member
630 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in kvm_tlb_flush_vmid_range() argument
636 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); in kvm_tlb_flush_vmid_range()
643 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages); in kvm_tlb_flush_vmid_range()
773 struct kvm_s2_mmu *mmu) in stage2_try_break_pte() argument
799 kvm_tlb_flush_vmid_range(mmu, addr, size); in stage2_try_break_pte()
801 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, in stage2_try_break_pte()
839 struct kvm_s2_mmu *mmu) in stage2_unmap_clear_pte() argument
846 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, in stage2_unmap_clear_pte()
[all …]
/arch/m68k/kernel/
Dsetup_mm.c378 const char *cpu, *mmu, *fpu; in show_cpuinfo() local
427 mmu = "68851"; in show_cpuinfo()
429 mmu = "68030"; in show_cpuinfo()
431 mmu = "68040"; in show_cpuinfo()
433 mmu = "68060"; in show_cpuinfo()
435 mmu = "Sun-3"; in show_cpuinfo()
437 mmu = "Apollo"; in show_cpuinfo()
439 mmu = "ColdFire"; in show_cpuinfo()
441 mmu = "unknown"; in show_cpuinfo()
451 cpu, mmu, fpu, in show_cpuinfo()
Dsetup_no.c176 char *cpu, *mmu, *fpu; in show_cpuinfo() local
180 mmu = "none"; in show_cpuinfo()
190 cpu, mmu, fpu, in show_cpuinfo()
/arch/sh/mm/
DMakefile18 mmu-y := nommu.o extable_32.o
19 mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
22 obj-y += $(mmu-y)
/arch/sparc/kernel/
Dwuf.S266 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
267 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
282 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
283 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
/arch/arm/lib/
DMakefile19 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
31 lib-$(CONFIG_MMU) += $(mmu-y)
/arch/powerpc/boot/dts/
Dmicrowatt.dts42 mmu-radix {
101 ibm,mmu-lpid-bits = <12>;
102 ibm,mmu-pid-bits = <20>;

1234567