1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10
11 #include <nvhe/mem_protect.h>
12
13 struct tlb_inv_context {
14 struct kvm_s2_mmu *mmu;
15 u64 tcr;
16 u64 sctlr;
17 };
18
enter_vmid_context(struct kvm_s2_mmu * mmu,struct tlb_inv_context * cxt)19 static void enter_vmid_context(struct kvm_s2_mmu *mmu,
20 struct tlb_inv_context *cxt)
21 {
22 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
23 struct kvm_cpu_context *host_ctxt;
24 struct kvm_vcpu *vcpu;
25
26 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
27 vcpu = host_ctxt->__hyp_running_vcpu;
28 cxt->mmu = NULL;
29
30 /*
31 * If we're already in the desired context, then there's nothing
32 * to do.
33 */
34 if (vcpu) {
35 /* We're in guest context */
36 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
37 return;
38
39 cxt->mmu = vcpu->arch.hw_mmu;
40 } else {
41 /* We're in host context */
42 if (mmu == host_s2_mmu)
43 return;
44
45 cxt->mmu = host_s2_mmu;
46 }
47
48 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
49 u64 val;
50
51 /*
52 * For CPUs that are affected by ARM 1319367, we need to
53 * avoid a Stage-1 walk with the old VMID while we have
54 * the new VMID set in the VTTBR in order to invalidate TLBs.
55 * We're guaranteed that the host S1 MMU is enabled, so
56 * we can simply set the EPD bits to avoid any further
57 * TLB fill. For guests, we ensure that the S1 MMU is
58 * temporarily enabled in the next context.
59 */
60 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
61 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
62 write_sysreg_el1(val, SYS_TCR);
63 isb();
64
65 if (vcpu) {
66 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
67 if (!(val & SCTLR_ELx_M)) {
68 val |= SCTLR_ELx_M;
69 write_sysreg_el1(val, SYS_SCTLR);
70 isb();
71 }
72 } else {
73 /* The host S1 MMU is always enabled. */
74 cxt->sctlr = SCTLR_ELx_M;
75 }
76 }
77
78 /*
79 * __load_stage2() includes an ISB only when the AT
80 * workaround is applied. Take care of the opposite condition,
81 * ensuring that we always have an ISB, but not two ISBs back
82 * to back.
83 */
84 if (vcpu)
85 __load_host_stage2();
86 else
87 __load_stage2(mmu, kern_hyp_va(mmu->arch));
88
89 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
90 }
91
exit_vmid_context(struct tlb_inv_context * cxt)92 static void exit_vmid_context(struct tlb_inv_context *cxt)
93 {
94 struct kvm_s2_mmu *mmu = cxt->mmu;
95 struct kvm_cpu_context *host_ctxt;
96 struct kvm_vcpu *vcpu;
97
98 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
99 vcpu = host_ctxt->__hyp_running_vcpu;
100
101 if (!mmu)
102 return;
103
104 if (vcpu)
105 __load_stage2(mmu, kern_hyp_va(mmu->arch));
106 else
107 __load_host_stage2();
108
109 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
110 /* Ensure write of the old VMID */
111 isb();
112
113 if (!(cxt->sctlr & SCTLR_ELx_M)) {
114 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
115 isb();
116 }
117
118 write_sysreg_el1(cxt->tcr, SYS_TCR);
119 }
120
121 cxt->mmu = NULL;
122 }
123
__kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)124 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
125 phys_addr_t ipa, int level)
126 {
127 struct tlb_inv_context cxt;
128
129 dsb(ishst);
130
131 /* Switch to requested VMID */
132 enter_vmid_context(mmu, &cxt);
133
134 /*
135 * We could do so much better if we had the VA as well.
136 * Instead, we invalidate Stage-2 for this IPA, and the
137 * whole of Stage-1. Weep...
138 */
139 ipa >>= 12;
140 __tlbi_level(ipas2e1is, ipa, level);
141
142 /*
143 * We have to ensure completion of the invalidation at Stage-2,
144 * since a table walk on another CPU could refill a TLB with a
145 * complete (S1 + S2) walk based on the old Stage-2 mapping if
146 * the Stage-1 invalidation happened first.
147 */
148 dsb(ish);
149 __tlbi(vmalle1is);
150 dsb(ish);
151 isb();
152
153 /*
154 * If the host is running at EL1 and we have a VPIPT I-cache,
155 * then we must perform I-cache maintenance at EL2 in order for
156 * it to have an effect on the guest. Since the guest cannot hit
157 * I-cache lines allocated with a different VMID, we don't need
158 * to worry about junk out of guest reset (we nuke the I-cache on
159 * VMID rollover), but we do need to be careful when remapping
160 * executable pages for the same guest. This can happen when KSM
161 * takes a CoW fault on an executable page, copies the page into
162 * a page that was previously mapped in the guest and then needs
163 * to invalidate the guest view of the I-cache for that page
164 * from EL1. To solve this, we invalidate the entire I-cache when
165 * unmapping a page from a guest if we have a VPIPT I-cache but
166 * the host is running at EL1. As above, we could do better if
167 * we had the VA.
168 *
169 * The moral of this story is: if you have a VPIPT I-cache, then
170 * you should be running with VHE enabled.
171 */
172 if (icache_is_vpipt())
173 icache_inval_all_pou();
174
175 exit_vmid_context(&cxt);
176 }
177
__kvm_tlb_flush_vmid(struct kvm_s2_mmu * mmu)178 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
179 {
180 struct tlb_inv_context cxt;
181
182 dsb(ishst);
183
184 /* Switch to requested VMID */
185 enter_vmid_context(mmu, &cxt);
186
187 __tlbi(vmalls12e1is);
188 dsb(ish);
189 isb();
190
191 exit_vmid_context(&cxt);
192 }
193
__kvm_flush_cpu_context(struct kvm_s2_mmu * mmu)194 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
195 {
196 struct tlb_inv_context cxt;
197
198 /* Switch to requested VMID */
199 enter_vmid_context(mmu, &cxt);
200
201 __tlbi(vmalle1);
202 asm volatile("ic iallu");
203 dsb(nsh);
204 isb();
205
206 exit_vmid_context(&cxt);
207 }
208
__kvm_flush_vm_context(void)209 void __kvm_flush_vm_context(void)
210 {
211 dsb(ishst);
212 __tlbi(alle1is);
213
214 /*
215 * VIPT and PIPT caches are not affected by VMID, so no maintenance
216 * is necessary across a VMID rollover.
217 *
218 * VPIPT caches constrain lookup and maintenance to the active VMID,
219 * so we need to invalidate lines with a stale VMID to avoid an ABA
220 * race after multiple rollovers.
221 *
222 */
223 if (icache_is_vpipt())
224 asm volatile("ic ialluis");
225
226 dsb(ish);
227 }
228