• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *    Copyright IBM Corp. 2007, 2011
3  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
20 
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26 
ptep_flush_direct(struct mm_struct * mm,unsigned long addr,pte_t * ptep)27 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 				      unsigned long addr, pte_t *ptep)
29 {
30 	pte_t old;
31 
32 	old = *ptep;
33 	if (unlikely(pte_val(old) & _PAGE_INVALID))
34 		return old;
35 	atomic_inc(&mm->context.flush_count);
36 	if (MACHINE_HAS_TLB_LC &&
37 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
38 		__ptep_ipte(addr, ptep, IPTE_LOCAL);
39 	else
40 		__ptep_ipte(addr, ptep, IPTE_GLOBAL);
41 	atomic_dec(&mm->context.flush_count);
42 	return old;
43 }
44 
ptep_flush_lazy(struct mm_struct * mm,unsigned long addr,pte_t * ptep)45 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
46 				    unsigned long addr, pte_t *ptep)
47 {
48 	pte_t old;
49 
50 	old = *ptep;
51 	if (unlikely(pte_val(old) & _PAGE_INVALID))
52 		return old;
53 	atomic_inc(&mm->context.flush_count);
54 	if (cpumask_equal(&mm->context.cpu_attach_mask,
55 			  cpumask_of(smp_processor_id()))) {
56 		pte_val(*ptep) |= _PAGE_INVALID;
57 		mm->context.flush_mm = 1;
58 	} else
59 		__ptep_ipte(addr, ptep, IPTE_GLOBAL);
60 	atomic_dec(&mm->context.flush_count);
61 	return old;
62 }
63 
pgste_get_lock(pte_t * ptep)64 static inline pgste_t pgste_get_lock(pte_t *ptep)
65 {
66 	unsigned long new = 0;
67 #ifdef CONFIG_PGSTE
68 	unsigned long old;
69 
70 	asm(
71 		"	lg	%0,%2\n"
72 		"0:	lgr	%1,%0\n"
73 		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
74 		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
75 		"	csg	%0,%1,%2\n"
76 		"	jl	0b\n"
77 		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
78 		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
79 #endif
80 	return __pgste(new);
81 }
82 
pgste_set_unlock(pte_t * ptep,pgste_t pgste)83 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
84 {
85 #ifdef CONFIG_PGSTE
86 	asm(
87 		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
88 		"	stg	%1,%0\n"
89 		: "=Q" (ptep[PTRS_PER_PTE])
90 		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
91 		: "cc", "memory");
92 #endif
93 }
94 
pgste_get(pte_t * ptep)95 static inline pgste_t pgste_get(pte_t *ptep)
96 {
97 	unsigned long pgste = 0;
98 #ifdef CONFIG_PGSTE
99 	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
100 #endif
101 	return __pgste(pgste);
102 }
103 
pgste_set(pte_t * ptep,pgste_t pgste)104 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
105 {
106 #ifdef CONFIG_PGSTE
107 	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
108 #endif
109 }
110 
pgste_update_all(pte_t pte,pgste_t pgste,struct mm_struct * mm)111 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
112 				       struct mm_struct *mm)
113 {
114 #ifdef CONFIG_PGSTE
115 	unsigned long address, bits, skey;
116 
117 	if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
118 		return pgste;
119 	address = pte_val(pte) & PAGE_MASK;
120 	skey = (unsigned long) page_get_storage_key(address);
121 	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
122 	/* Transfer page changed & referenced bit to guest bits in pgste */
123 	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
124 	/* Copy page access key and fetch protection bit to pgste */
125 	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
126 	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
127 #endif
128 	return pgste;
129 
130 }
131 
pgste_set_key(pte_t * ptep,pgste_t pgste,pte_t entry,struct mm_struct * mm)132 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
133 				 struct mm_struct *mm)
134 {
135 #ifdef CONFIG_PGSTE
136 	unsigned long address;
137 	unsigned long nkey;
138 
139 	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
140 		return;
141 	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
142 	address = pte_val(entry) & PAGE_MASK;
143 	/*
144 	 * Set page access key and fetch protection bit from pgste.
145 	 * The guest C/R information is still in the PGSTE, set real
146 	 * key C/R to 0.
147 	 */
148 	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
149 	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
150 	page_set_storage_key(address, nkey, 0);
151 #endif
152 }
153 
pgste_set_pte(pte_t * ptep,pgste_t pgste,pte_t entry)154 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
155 {
156 #ifdef CONFIG_PGSTE
157 	if ((pte_val(entry) & _PAGE_PRESENT) &&
158 	    (pte_val(entry) & _PAGE_WRITE) &&
159 	    !(pte_val(entry) & _PAGE_INVALID)) {
160 		if (!MACHINE_HAS_ESOP) {
161 			/*
162 			 * Without enhanced suppression-on-protection force
163 			 * the dirty bit on for all writable ptes.
164 			 */
165 			pte_val(entry) |= _PAGE_DIRTY;
166 			pte_val(entry) &= ~_PAGE_PROTECT;
167 		}
168 		if (!(pte_val(entry) & _PAGE_PROTECT))
169 			/* This pte allows write access, set user-dirty */
170 			pgste_val(pgste) |= PGSTE_UC_BIT;
171 	}
172 #endif
173 	*ptep = entry;
174 	return pgste;
175 }
176 
pgste_pte_notify(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pgste_t pgste)177 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
178 				       unsigned long addr,
179 				       pte_t *ptep, pgste_t pgste)
180 {
181 #ifdef CONFIG_PGSTE
182 	unsigned long bits;
183 
184 	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
185 	if (bits) {
186 		pgste_val(pgste) ^= bits;
187 		ptep_notify(mm, addr, ptep, bits);
188 	}
189 #endif
190 	return pgste;
191 }
192 
ptep_xchg_start(struct mm_struct * mm,unsigned long addr,pte_t * ptep)193 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
194 				      unsigned long addr, pte_t *ptep)
195 {
196 	pgste_t pgste = __pgste(0);
197 
198 	if (mm_has_pgste(mm)) {
199 		pgste = pgste_get_lock(ptep);
200 		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
201 	}
202 	return pgste;
203 }
204 
ptep_xchg_commit(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pgste_t pgste,pte_t old,pte_t new)205 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 				    unsigned long addr, pte_t *ptep,
207 				    pgste_t pgste, pte_t old, pte_t new)
208 {
209 	if (mm_has_pgste(mm)) {
210 		if (pte_val(old) & _PAGE_INVALID)
211 			pgste_set_key(ptep, pgste, new, mm);
212 		if (pte_val(new) & _PAGE_INVALID) {
213 			pgste = pgste_update_all(old, pgste, mm);
214 			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
215 			    _PGSTE_GPS_USAGE_UNUSED)
216 				pte_val(old) |= _PAGE_UNUSED;
217 		}
218 		pgste = pgste_set_pte(ptep, pgste, new);
219 		pgste_set_unlock(ptep, pgste);
220 	} else {
221 		*ptep = new;
222 	}
223 	return old;
224 }
225 
ptep_xchg_direct(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t new)226 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
227 		       pte_t *ptep, pte_t new)
228 {
229 	pgste_t pgste;
230 	pte_t old;
231 
232 	preempt_disable();
233 	pgste = ptep_xchg_start(mm, addr, ptep);
234 	old = ptep_flush_direct(mm, addr, ptep);
235 	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
236 	preempt_enable();
237 	return old;
238 }
239 EXPORT_SYMBOL(ptep_xchg_direct);
240 
ptep_xchg_lazy(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t new)241 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
242 		     pte_t *ptep, pte_t new)
243 {
244 	pgste_t pgste;
245 	pte_t old;
246 
247 	preempt_disable();
248 	pgste = ptep_xchg_start(mm, addr, ptep);
249 	old = ptep_flush_lazy(mm, addr, ptep);
250 	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
251 	preempt_enable();
252 	return old;
253 }
254 EXPORT_SYMBOL(ptep_xchg_lazy);
255 
ptep_modify_prot_start(struct mm_struct * mm,unsigned long addr,pte_t * ptep)256 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
257 			     pte_t *ptep)
258 {
259 	pgste_t pgste;
260 	pte_t old;
261 
262 	preempt_disable();
263 	pgste = ptep_xchg_start(mm, addr, ptep);
264 	old = ptep_flush_lazy(mm, addr, ptep);
265 	if (mm_has_pgste(mm)) {
266 		pgste = pgste_update_all(old, pgste, mm);
267 		pgste_set(ptep, pgste);
268 	}
269 	return old;
270 }
271 EXPORT_SYMBOL(ptep_modify_prot_start);
272 
ptep_modify_prot_commit(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)273 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
274 			     pte_t *ptep, pte_t pte)
275 {
276 	pgste_t pgste;
277 
278 	if (mm_has_pgste(mm)) {
279 		pgste = pgste_get(ptep);
280 		pgste_set_key(ptep, pgste, pte, mm);
281 		pgste = pgste_set_pte(ptep, pgste, pte);
282 		pgste_set_unlock(ptep, pgste);
283 	} else {
284 		*ptep = pte;
285 	}
286 	preempt_enable();
287 }
288 EXPORT_SYMBOL(ptep_modify_prot_commit);
289 
pmdp_flush_direct(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)290 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
291 				      unsigned long addr, pmd_t *pmdp)
292 {
293 	pmd_t old;
294 
295 	old = *pmdp;
296 	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
297 		return old;
298 	if (!MACHINE_HAS_IDTE) {
299 		__pmdp_csp(pmdp);
300 		return old;
301 	}
302 	atomic_inc(&mm->context.flush_count);
303 	if (MACHINE_HAS_TLB_LC &&
304 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
305 		__pmdp_idte(addr, pmdp, IDTE_LOCAL);
306 	else
307 		__pmdp_idte(addr, pmdp, IDTE_GLOBAL);
308 	atomic_dec(&mm->context.flush_count);
309 	return old;
310 }
311 
pmdp_flush_lazy(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)312 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
313 				    unsigned long addr, pmd_t *pmdp)
314 {
315 	pmd_t old;
316 
317 	old = *pmdp;
318 	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
319 		return old;
320 	atomic_inc(&mm->context.flush_count);
321 	if (cpumask_equal(&mm->context.cpu_attach_mask,
322 			  cpumask_of(smp_processor_id()))) {
323 		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
324 		mm->context.flush_mm = 1;
325 	} else if (MACHINE_HAS_IDTE)
326 		__pmdp_idte(addr, pmdp, IDTE_GLOBAL);
327 	else
328 		__pmdp_csp(pmdp);
329 	atomic_dec(&mm->context.flush_count);
330 	return old;
331 }
332 
pmdp_xchg_direct(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t new)333 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
334 		       pmd_t *pmdp, pmd_t new)
335 {
336 	pmd_t old;
337 
338 	preempt_disable();
339 	old = pmdp_flush_direct(mm, addr, pmdp);
340 	*pmdp = new;
341 	preempt_enable();
342 	return old;
343 }
344 EXPORT_SYMBOL(pmdp_xchg_direct);
345 
pmdp_xchg_lazy(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t new)346 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
347 		     pmd_t *pmdp, pmd_t new)
348 {
349 	pmd_t old;
350 
351 	preempt_disable();
352 	old = pmdp_flush_lazy(mm, addr, pmdp);
353 	*pmdp = new;
354 	preempt_enable();
355 	return old;
356 }
357 EXPORT_SYMBOL(pmdp_xchg_lazy);
358 
pudp_flush_direct(struct mm_struct * mm,unsigned long addr,pud_t * pudp)359 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
360 				      unsigned long addr, pud_t *pudp)
361 {
362 	pud_t old;
363 
364 	old = *pudp;
365 	if (pud_val(old) & _REGION_ENTRY_INVALID)
366 		return old;
367 	if (!MACHINE_HAS_IDTE) {
368 		/*
369 		 * Invalid bit position is the same for pmd and pud, so we can
370 		 * re-use _pmd_csp() here
371 		 */
372 		__pmdp_csp((pmd_t *) pudp);
373 		return old;
374 	}
375 	atomic_inc(&mm->context.flush_count);
376 	if (MACHINE_HAS_TLB_LC &&
377 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
378 		__pudp_idte(addr, pudp, IDTE_LOCAL);
379 	else
380 		__pudp_idte(addr, pudp, IDTE_GLOBAL);
381 	atomic_dec(&mm->context.flush_count);
382 	return old;
383 }
384 
pudp_xchg_direct(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t new)385 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
386 		       pud_t *pudp, pud_t new)
387 {
388 	pud_t old;
389 
390 	preempt_disable();
391 	old = pudp_flush_direct(mm, addr, pudp);
392 	*pudp = new;
393 	preempt_enable();
394 	return old;
395 }
396 EXPORT_SYMBOL(pudp_xchg_direct);
397 
398 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)399 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
400 				pgtable_t pgtable)
401 {
402 	struct list_head *lh = (struct list_head *) pgtable;
403 
404 	assert_spin_locked(pmd_lockptr(mm, pmdp));
405 
406 	/* FIFO */
407 	if (!pmd_huge_pte(mm, pmdp))
408 		INIT_LIST_HEAD(lh);
409 	else
410 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
411 	pmd_huge_pte(mm, pmdp) = pgtable;
412 }
413 
pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)414 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
415 {
416 	struct list_head *lh;
417 	pgtable_t pgtable;
418 	pte_t *ptep;
419 
420 	assert_spin_locked(pmd_lockptr(mm, pmdp));
421 
422 	/* FIFO */
423 	pgtable = pmd_huge_pte(mm, pmdp);
424 	lh = (struct list_head *) pgtable;
425 	if (list_empty(lh))
426 		pmd_huge_pte(mm, pmdp) = NULL;
427 	else {
428 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
429 		list_del(lh);
430 	}
431 	ptep = (pte_t *) pgtable;
432 	pte_val(*ptep) = _PAGE_INVALID;
433 	ptep++;
434 	pte_val(*ptep) = _PAGE_INVALID;
435 	return pgtable;
436 }
437 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
438 
439 #ifdef CONFIG_PGSTE
ptep_set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)440 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
441 		     pte_t *ptep, pte_t entry)
442 {
443 	pgste_t pgste;
444 
445 	/* the mm_has_pgste() check is done in set_pte_at() */
446 	preempt_disable();
447 	pgste = pgste_get_lock(ptep);
448 	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
449 	pgste_set_key(ptep, pgste, entry, mm);
450 	pgste = pgste_set_pte(ptep, pgste, entry);
451 	pgste_set_unlock(ptep, pgste);
452 	preempt_enable();
453 }
454 
ptep_set_notify(struct mm_struct * mm,unsigned long addr,pte_t * ptep)455 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
456 {
457 	pgste_t pgste;
458 
459 	preempt_disable();
460 	pgste = pgste_get_lock(ptep);
461 	pgste_val(pgste) |= PGSTE_IN_BIT;
462 	pgste_set_unlock(ptep, pgste);
463 	preempt_enable();
464 }
465 
466 /**
467  * ptep_force_prot - change access rights of a locked pte
468  * @mm: pointer to the process mm_struct
469  * @addr: virtual address in the guest address space
470  * @ptep: pointer to the page table entry
471  * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
472  * @bit: pgste bit to set (e.g. for notification)
473  *
474  * Returns 0 if the access rights were changed and -EAGAIN if the current
475  * and requested access rights are incompatible.
476  */
ptep_force_prot(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int prot,unsigned long bit)477 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
478 		    pte_t *ptep, int prot, unsigned long bit)
479 {
480 	pte_t entry;
481 	pgste_t pgste;
482 	int pte_i, pte_p;
483 
484 	pgste = pgste_get_lock(ptep);
485 	entry = *ptep;
486 	/* Check pte entry after all locks have been acquired */
487 	pte_i = pte_val(entry) & _PAGE_INVALID;
488 	pte_p = pte_val(entry) & _PAGE_PROTECT;
489 	if ((pte_i && (prot != PROT_NONE)) ||
490 	    (pte_p && (prot & PROT_WRITE))) {
491 		pgste_set_unlock(ptep, pgste);
492 		return -EAGAIN;
493 	}
494 	/* Change access rights and set pgste bit */
495 	if (prot == PROT_NONE && !pte_i) {
496 		ptep_flush_direct(mm, addr, ptep);
497 		pgste = pgste_update_all(entry, pgste, mm);
498 		pte_val(entry) |= _PAGE_INVALID;
499 	}
500 	if (prot == PROT_READ && !pte_p) {
501 		ptep_flush_direct(mm, addr, ptep);
502 		pte_val(entry) &= ~_PAGE_INVALID;
503 		pte_val(entry) |= _PAGE_PROTECT;
504 	}
505 	pgste_val(pgste) |= bit;
506 	pgste = pgste_set_pte(ptep, pgste, entry);
507 	pgste_set_unlock(ptep, pgste);
508 	return 0;
509 }
510 
ptep_shadow_pte(struct mm_struct * mm,unsigned long saddr,pte_t * sptep,pte_t * tptep,pte_t pte)511 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
512 		    pte_t *sptep, pte_t *tptep, pte_t pte)
513 {
514 	pgste_t spgste, tpgste;
515 	pte_t spte, tpte;
516 	int rc = -EAGAIN;
517 
518 	if (!(pte_val(*tptep) & _PAGE_INVALID))
519 		return 0;	/* already shadowed */
520 	spgste = pgste_get_lock(sptep);
521 	spte = *sptep;
522 	if (!(pte_val(spte) & _PAGE_INVALID) &&
523 	    !((pte_val(spte) & _PAGE_PROTECT) &&
524 	      !(pte_val(pte) & _PAGE_PROTECT))) {
525 		pgste_val(spgste) |= PGSTE_VSIE_BIT;
526 		tpgste = pgste_get_lock(tptep);
527 		pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
528 				(pte_val(pte) & _PAGE_PROTECT);
529 		/* don't touch the storage key - it belongs to parent pgste */
530 		tpgste = pgste_set_pte(tptep, tpgste, tpte);
531 		pgste_set_unlock(tptep, tpgste);
532 		rc = 1;
533 	}
534 	pgste_set_unlock(sptep, spgste);
535 	return rc;
536 }
537 
ptep_unshadow_pte(struct mm_struct * mm,unsigned long saddr,pte_t * ptep)538 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
539 {
540 	pgste_t pgste;
541 
542 	pgste = pgste_get_lock(ptep);
543 	/* notifier is called by the caller */
544 	ptep_flush_direct(mm, saddr, ptep);
545 	/* don't touch the storage key - it belongs to parent pgste */
546 	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
547 	pgste_set_unlock(ptep, pgste);
548 }
549 
ptep_zap_swap_entry(struct mm_struct * mm,swp_entry_t entry)550 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
551 {
552 	if (!non_swap_entry(entry))
553 		dec_mm_counter(mm, MM_SWAPENTS);
554 	else if (is_migration_entry(entry)) {
555 		struct page *page = migration_entry_to_page(entry);
556 
557 		dec_mm_counter(mm, mm_counter(page));
558 	}
559 	free_swap_and_cache(entry);
560 }
561 
ptep_zap_unused(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int reset)562 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
563 		     pte_t *ptep, int reset)
564 {
565 	unsigned long pgstev;
566 	pgste_t pgste;
567 	pte_t pte;
568 
569 	/* Zap unused and logically-zero pages */
570 	preempt_disable();
571 	pgste = pgste_get_lock(ptep);
572 	pgstev = pgste_val(pgste);
573 	pte = *ptep;
574 	if (!reset && pte_swap(pte) &&
575 	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
576 	     (pgstev & _PGSTE_GPS_ZERO))) {
577 		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
578 		pte_clear(mm, addr, ptep);
579 	}
580 	if (reset)
581 		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
582 	pgste_set_unlock(ptep, pgste);
583 	preempt_enable();
584 }
585 
ptep_zap_key(struct mm_struct * mm,unsigned long addr,pte_t * ptep)586 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
587 {
588 	unsigned long ptev;
589 	pgste_t pgste;
590 
591 	/* Clear storage key */
592 	preempt_disable();
593 	pgste = pgste_get_lock(ptep);
594 	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
595 			      PGSTE_GR_BIT | PGSTE_GC_BIT);
596 	ptev = pte_val(*ptep);
597 	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
598 		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
599 	pgste_set_unlock(ptep, pgste);
600 	preempt_enable();
601 }
602 
603 /*
604  * Test and reset if a guest page is dirty
605  */
test_and_clear_guest_dirty(struct mm_struct * mm,unsigned long addr)606 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
607 {
608 	spinlock_t *ptl;
609 	pgd_t *pgd;
610 	pud_t *pud;
611 	pmd_t *pmd;
612 	pgste_t pgste;
613 	pte_t *ptep;
614 	pte_t pte;
615 	bool dirty;
616 
617 	pgd = pgd_offset(mm, addr);
618 	pud = pud_alloc(mm, pgd, addr);
619 	if (!pud)
620 		return false;
621 	pmd = pmd_alloc(mm, pud, addr);
622 	if (!pmd)
623 		return false;
624 	/* We can't run guests backed by huge pages, but userspace can
625 	 * still set them up and then try to migrate them without any
626 	 * migration support.
627 	 */
628 	if (pmd_large(*pmd))
629 		return true;
630 
631 	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
632 	if (unlikely(!ptep))
633 		return false;
634 
635 	pgste = pgste_get_lock(ptep);
636 	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
637 	pgste_val(pgste) &= ~PGSTE_UC_BIT;
638 	pte = *ptep;
639 	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
640 		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
641 		__ptep_ipte(addr, ptep, IPTE_GLOBAL);
642 		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
643 			pte_val(pte) |= _PAGE_PROTECT;
644 		else
645 			pte_val(pte) |= _PAGE_INVALID;
646 		*ptep = pte;
647 	}
648 	pgste_set_unlock(ptep, pgste);
649 
650 	spin_unlock(ptl);
651 	return dirty;
652 }
653 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
654 
set_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char key,bool nq)655 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
656 			  unsigned char key, bool nq)
657 {
658 	unsigned long keyul;
659 	spinlock_t *ptl;
660 	pgste_t old, new;
661 	pte_t *ptep;
662 
663 	ptep = get_locked_pte(mm, addr, &ptl);
664 	if (unlikely(!ptep))
665 		return -EFAULT;
666 
667 	new = old = pgste_get_lock(ptep);
668 	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
669 			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
670 	keyul = (unsigned long) key;
671 	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
672 	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
673 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
674 		unsigned long address, bits, skey;
675 
676 		address = pte_val(*ptep) & PAGE_MASK;
677 		skey = (unsigned long) page_get_storage_key(address);
678 		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
679 		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
680 		/* Set storage key ACC and FP */
681 		page_set_storage_key(address, skey, !nq);
682 		/* Merge host changed & referenced into pgste  */
683 		pgste_val(new) |= bits << 52;
684 	}
685 	/* changing the guest storage key is considered a change of the page */
686 	if ((pgste_val(new) ^ pgste_val(old)) &
687 	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
688 		pgste_val(new) |= PGSTE_UC_BIT;
689 
690 	pgste_set_unlock(ptep, new);
691 	pte_unmap_unlock(ptep, ptl);
692 	return 0;
693 }
694 EXPORT_SYMBOL(set_guest_storage_key);
695 
696 /**
697  * Conditionally set a guest storage key (handling csske).
698  * oldkey will be updated when either mr or mc is set and a pointer is given.
699  *
700  * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
701  * storage key was updated and -EFAULT on access errors.
702  */
cond_set_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char key,unsigned char * oldkey,bool nq,bool mr,bool mc)703 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
704 			       unsigned char key, unsigned char *oldkey,
705 			       bool nq, bool mr, bool mc)
706 {
707 	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
708 	int rc;
709 
710 	/* we can drop the pgste lock between getting and setting the key */
711 	if (mr | mc) {
712 		rc = get_guest_storage_key(current->mm, addr, &tmp);
713 		if (rc)
714 			return rc;
715 		if (oldkey)
716 			*oldkey = tmp;
717 		if (!mr)
718 			mask |= _PAGE_REFERENCED;
719 		if (!mc)
720 			mask |= _PAGE_CHANGED;
721 		if (!((tmp ^ key) & mask))
722 			return 0;
723 	}
724 	rc = set_guest_storage_key(current->mm, addr, key, nq);
725 	return rc < 0 ? rc : 1;
726 }
727 EXPORT_SYMBOL(cond_set_guest_storage_key);
728 
729 /**
730  * Reset a guest reference bit (rrbe), returning the reference and changed bit.
731  *
732  * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
733  */
reset_guest_reference_bit(struct mm_struct * mm,unsigned long addr)734 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
735 {
736 	spinlock_t *ptl;
737 	pgste_t old, new;
738 	pte_t *ptep;
739 	int cc = 0;
740 
741 	ptep = get_locked_pte(mm, addr, &ptl);
742 	if (unlikely(!ptep))
743 		return -EFAULT;
744 
745 	new = old = pgste_get_lock(ptep);
746 	/* Reset guest reference bit only */
747 	pgste_val(new) &= ~PGSTE_GR_BIT;
748 
749 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
750 		cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
751 		/* Merge real referenced bit into host-set */
752 		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
753 	}
754 	/* Reflect guest's logical view, not physical */
755 	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
756 	/* Changing the guest storage key is considered a change of the page */
757 	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
758 		pgste_val(new) |= PGSTE_UC_BIT;
759 
760 	pgste_set_unlock(ptep, new);
761 	pte_unmap_unlock(ptep, ptl);
762 	return 0;
763 }
764 EXPORT_SYMBOL(reset_guest_reference_bit);
765 
get_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char * key)766 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
767 			  unsigned char *key)
768 {
769 	spinlock_t *ptl;
770 	pgste_t pgste;
771 	pte_t *ptep;
772 
773 	ptep = get_locked_pte(mm, addr, &ptl);
774 	if (unlikely(!ptep))
775 		return -EFAULT;
776 
777 	pgste = pgste_get_lock(ptep);
778 	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
779 	if (!(pte_val(*ptep) & _PAGE_INVALID))
780 		*key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
781 	/* Reflect guest's logical view, not physical */
782 	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
783 	pgste_set_unlock(ptep, pgste);
784 	pte_unmap_unlock(ptep, ptl);
785 	return 0;
786 }
787 EXPORT_SYMBOL(get_guest_storage_key);
788 #endif
789