• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22 
23 #include <asm/book3s/64/mmu-hash.h>
24 
25 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
26 #define PPC_MIN_HPT_ORDER	18
27 #define PPC_MAX_HPT_ORDER	46
28 
29 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
svcpu_get(struct kvm_vcpu * vcpu)30 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
31 {
32 	preempt_disable();
33 	return &get_paca()->shadow_vcpu;
34 }
35 
svcpu_put(struct kvmppc_book3s_shadow_vcpu * svcpu)36 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37 {
38 	preempt_enable();
39 }
40 #endif
41 
42 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
43 
kvm_is_radix(struct kvm * kvm)44 static inline bool kvm_is_radix(struct kvm *kvm)
45 {
46 	return kvm->arch.radix;
47 }
48 
49 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
50 #endif
51 
52 /*
53  * We use a lock bit in HPTE dword 0 to synchronize updates and
54  * accesses to each HPTE, and another bit to indicate non-present
55  * HPTEs.
56  */
57 #define HPTE_V_HVLOCK	0x40UL
58 #define HPTE_V_ABSENT	0x20UL
59 
60 /*
61  * We use this bit in the guest_rpte field of the revmap entry
62  * to indicate a modified HPTE.
63  */
64 #define HPTE_GR_MODIFIED	(1ul << 62)
65 
66 /* These bits are reserved in the guest view of the HPTE */
67 #define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
68 
try_lock_hpte(__be64 * hpte,unsigned long bits)69 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
70 {
71 	unsigned long tmp, old;
72 	__be64 be_lockbit, be_bits;
73 
74 	/*
75 	 * We load/store in native endian, but the HTAB is in big endian. If
76 	 * we byte swap all data we apply on the PTE we're implicitly correct
77 	 * again.
78 	 */
79 	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
80 	be_bits = cpu_to_be64(bits);
81 
82 	asm volatile("	ldarx	%0,0,%2\n"
83 		     "	and.	%1,%0,%3\n"
84 		     "	bne	2f\n"
85 		     "	or	%0,%0,%4\n"
86 		     "  stdcx.	%0,0,%2\n"
87 		     "	beq+	2f\n"
88 		     "	mr	%1,%3\n"
89 		     "2:	isync"
90 		     : "=&r" (tmp), "=&r" (old)
91 		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
92 		     : "cc", "memory");
93 	return old == 0;
94 }
95 
unlock_hpte(__be64 * hpte,unsigned long hpte_v)96 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
97 {
98 	hpte_v &= ~HPTE_V_HVLOCK;
99 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
100 	hpte[0] = cpu_to_be64(hpte_v);
101 }
102 
103 /* Without barrier */
__unlock_hpte(__be64 * hpte,unsigned long hpte_v)104 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
105 {
106 	hpte_v &= ~HPTE_V_HVLOCK;
107 	hpte[0] = cpu_to_be64(hpte_v);
108 }
109 
compute_tlbie_rb(unsigned long v,unsigned long r,unsigned long pte_index)110 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
111 					     unsigned long pte_index)
112 {
113 	int i, b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
114 	unsigned int penc;
115 	unsigned long rb = 0, va_low, sllp;
116 	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
117 
118 	if (v & HPTE_V_LARGE) {
119 		i = hpte_page_sizes[lp];
120 		b_psize = i & 0xf;
121 		a_psize = i >> 4;
122 	}
123 
124 	/*
125 	 * Ignore the top 14 bits of va
126 	 * v have top two bits covering segment size, hence move
127 	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
128 	 * AVA field in v also have the lower 23 bits ignored.
129 	 * For base page size 4K we need 14 .. 65 bits (so need to
130 	 * collect extra 11 bits)
131 	 * For others we need 14..14+i
132 	 */
133 	/* This covers 14..54 bits of va*/
134 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
135 
136 	/*
137 	 * AVA in v had cleared lower 23 bits. We need to derive
138 	 * that from pteg index
139 	 */
140 	va_low = pte_index >> 3;
141 	if (v & HPTE_V_SECONDARY)
142 		va_low = ~va_low;
143 	/*
144 	 * get the vpn bits from va_low using reverse of hashing.
145 	 * In v we have va with 23 bits dropped and then left shifted
146 	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
147 	 * right shift it with (SID_SHIFT - (23 - 7))
148 	 */
149 	if (!(v & HPTE_V_1TB_SEG))
150 		va_low ^= v >> (SID_SHIFT - 16);
151 	else
152 		va_low ^= v >> (SID_SHIFT_1T - 16);
153 	va_low &= 0x7ff;
154 
155 	switch (b_psize) {
156 	case MMU_PAGE_4K:
157 		sllp = get_sllp_encoding(a_psize);
158 		rb |= sllp << 5;	/*  AP field */
159 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
160 		break;
161 	default:
162 	{
163 		int aval_shift;
164 		/*
165 		 * remaining bits of AVA/LP fields
166 		 * Also contain the rr bits of LP
167 		 */
168 		rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
169 		/*
170 		 * Now clear not needed LP bits based on actual psize
171 		 */
172 		rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
173 		/*
174 		 * AVAL field 58..77 - base_page_shift bits of va
175 		 * we have space for 58..64 bits, Missing bits should
176 		 * be zero filled. +1 is to take care of L bit shift
177 		 */
178 		aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
179 		rb |= ((va_low << aval_shift) & 0xfe);
180 
181 		rb |= 1;		/* L field */
182 		penc = mmu_psize_defs[b_psize].penc[a_psize];
183 		rb |= penc << 12;	/* LP field */
184 		break;
185 	}
186 	}
187 	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
188 	return rb;
189 }
190 
hpte_rpn(unsigned long ptel,unsigned long psize)191 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
192 {
193 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
194 }
195 
hpte_is_writable(unsigned long ptel)196 static inline int hpte_is_writable(unsigned long ptel)
197 {
198 	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
199 
200 	return pp != PP_RXRX && pp != PP_RXXX;
201 }
202 
hpte_make_readonly(unsigned long ptel)203 static inline unsigned long hpte_make_readonly(unsigned long ptel)
204 {
205 	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
206 		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
207 	else
208 		ptel |= PP_RXRX;
209 	return ptel;
210 }
211 
hpte_cache_flags_ok(unsigned long hptel,bool is_ci)212 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
213 {
214 	unsigned int wimg = hptel & HPTE_R_WIMG;
215 
216 	/* Handle SAO */
217 	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
218 	    cpu_has_feature(CPU_FTR_ARCH_206))
219 		wimg = HPTE_R_M;
220 
221 	if (!is_ci)
222 		return wimg == HPTE_R_M;
223 	/*
224 	 * if host is mapped cache inhibited, make sure hptel also have
225 	 * cache inhibited.
226 	 */
227 	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
228 		return false;
229 	return !!(wimg & HPTE_R_I);
230 }
231 
232 /*
233  * If it's present and writable, atomically set dirty and referenced bits and
234  * return the PTE, otherwise return 0.
235  */
kvmppc_read_update_linux_pte(pte_t * ptep,int writing)236 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
237 {
238 	pte_t old_pte, new_pte = __pte(0);
239 
240 	while (1) {
241 		/*
242 		 * Make sure we don't reload from ptep
243 		 */
244 		old_pte = READ_ONCE(*ptep);
245 		/*
246 		 * wait until H_PAGE_BUSY is clear then set it atomically
247 		 */
248 		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
249 			cpu_relax();
250 			continue;
251 		}
252 		/* If pte is not present return None */
253 		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
254 			return __pte(0);
255 
256 		new_pte = pte_mkyoung(old_pte);
257 		if (writing && pte_write(old_pte))
258 			new_pte = pte_mkdirty(new_pte);
259 
260 		if (pte_xchg(ptep, old_pte, new_pte))
261 			break;
262 	}
263 	return new_pte;
264 }
265 
hpte_read_permission(unsigned long pp,unsigned long key)266 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
267 {
268 	if (key)
269 		return PP_RWRX <= pp && pp <= PP_RXRX;
270 	return true;
271 }
272 
hpte_write_permission(unsigned long pp,unsigned long key)273 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
274 {
275 	if (key)
276 		return pp == PP_RWRW;
277 	return pp <= PP_RWRW;
278 }
279 
hpte_get_skey_perm(unsigned long hpte_r,unsigned long amr)280 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
281 {
282 	unsigned long skey;
283 
284 	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
285 		((hpte_r & HPTE_R_KEY_LO) >> 9);
286 	return (amr >> (62 - 2 * skey)) & 3;
287 }
288 
lock_rmap(unsigned long * rmap)289 static inline void lock_rmap(unsigned long *rmap)
290 {
291 	do {
292 		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
293 			cpu_relax();
294 	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
295 }
296 
unlock_rmap(unsigned long * rmap)297 static inline void unlock_rmap(unsigned long *rmap)
298 {
299 	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
300 }
301 
slot_is_aligned(struct kvm_memory_slot * memslot,unsigned long pagesize)302 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
303 				   unsigned long pagesize)
304 {
305 	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
306 
307 	if (pagesize <= PAGE_SIZE)
308 		return true;
309 	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
310 }
311 
312 /*
313  * This works for 4k, 64k and 16M pages on POWER7,
314  * and 4k and 16M pages on PPC970.
315  */
slb_pgsize_encoding(unsigned long psize)316 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
317 {
318 	unsigned long senc = 0;
319 
320 	if (psize > 0x1000) {
321 		senc = SLB_VSID_L;
322 		if (psize == 0x10000)
323 			senc |= SLB_VSID_LP_01;
324 	}
325 	return senc;
326 }
327 
is_vrma_hpte(unsigned long hpte_v)328 static inline int is_vrma_hpte(unsigned long hpte_v)
329 {
330 	return (hpte_v & ~0xffffffUL) ==
331 		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
332 }
333 
334 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
335 /*
336  * Note modification of an HPTE; set the HPTE modified bit
337  * if anyone is interested.
338  */
note_hpte_modification(struct kvm * kvm,struct revmap_entry * rev)339 static inline void note_hpte_modification(struct kvm *kvm,
340 					  struct revmap_entry *rev)
341 {
342 	if (atomic_read(&kvm->arch.hpte_mod_interest))
343 		rev->guest_rpte |= HPTE_GR_MODIFIED;
344 }
345 
346 /*
347  * Like kvm_memslots(), but for use in real mode when we can't do
348  * any RCU stuff (since the secondary threads are offline from the
349  * kernel's point of view), and we can't print anything.
350  * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
351  */
kvm_memslots_raw(struct kvm * kvm)352 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
353 {
354 	return rcu_dereference_raw_notrace(kvm->memslots[0]);
355 }
356 
357 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
358 
359 extern void kvmhv_rm_send_ipi(int cpu);
360 
kvmppc_hpt_npte(struct kvm_hpt_info * hpt)361 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
362 {
363 	/* HPTEs are 2**4 bytes long */
364 	return 1UL << (hpt->order - 4);
365 }
366 
kvmppc_hpt_mask(struct kvm_hpt_info * hpt)367 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
368 {
369 	/* 128 (2**7) bytes in each HPTEG */
370 	return (1UL << (hpt->order - 7)) - 1;
371 }
372 
373 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
374 
375 #endif /* __ASM_KVM_BOOK3S_64_H__ */
376