• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22 #include <asm/uv.h>
23 
24 extern pgd_t swapper_pg_dir[];
25 extern void paging_init(void);
26 
27 enum {
28 	PG_DIRECT_MAP_4K = 0,
29 	PG_DIRECT_MAP_1M,
30 	PG_DIRECT_MAP_2G,
31 	PG_DIRECT_MAP_MAX
32 };
33 
34 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
35 
update_page_count(int level,long count)36 static inline void update_page_count(int level, long count)
37 {
38 	if (IS_ENABLED(CONFIG_PROC_FS))
39 		atomic_long_add(count, &direct_pages_count[level]);
40 }
41 
42 struct seq_file;
43 void arch_report_meminfo(struct seq_file *m);
44 
45 /*
46  * The S390 doesn't have any external MMU info: the kernel page
47  * tables contain all the necessary information.
48  */
49 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
50 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
51 
52 /*
53  * ZERO_PAGE is a global shared page that is always zero; used
54  * for zero-mapped memory areas etc..
55  */
56 
57 extern unsigned long empty_zero_page;
58 extern unsigned long zero_page_mask;
59 
60 #define ZERO_PAGE(vaddr) \
61 	(virt_to_page((void *)(empty_zero_page + \
62 	 (((unsigned long)(vaddr)) &zero_page_mask))))
63 #define __HAVE_COLOR_ZERO_PAGE
64 
65 /* TODO: s390 cannot support io_remap_pfn_range... */
66 
67 #define FIRST_USER_ADDRESS  0UL
68 
69 #define pte_ERROR(e) \
70 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71 #define pmd_ERROR(e) \
72 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73 #define pud_ERROR(e) \
74 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75 #define p4d_ERROR(e) \
76 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77 #define pgd_ERROR(e) \
78 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79 
80 /*
81  * The vmalloc and module area will always be on the topmost area of the
82  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
83  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
84  * modules will reside. That makes sure that inter module branches always
85  * happen without trampolines and in addition the placement within a 2GB frame
86  * is branch prediction unit friendly.
87  */
88 extern unsigned long VMALLOC_START;
89 extern unsigned long VMALLOC_END;
90 #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
91 extern struct page *vmemmap;
92 extern unsigned long vmemmap_size;
93 
94 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95 
96 extern unsigned long MODULES_VADDR;
97 extern unsigned long MODULES_END;
98 #define MODULES_VADDR	MODULES_VADDR
99 #define MODULES_END	MODULES_END
100 #define MODULES_LEN	(1UL << 31)
101 
is_module_addr(void * addr)102 static inline int is_module_addr(void *addr)
103 {
104 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 	if (addr < (void *)MODULES_VADDR)
106 		return 0;
107 	if (addr > (void *)MODULES_END)
108 		return 0;
109 	return 1;
110 }
111 
112 /*
113  * A 64 bit pagetable entry of S390 has following format:
114  * |			 PFRA			      |0IPC|  OS  |
115  * 0000000000111111111122222222223333333333444444444455555555556666
116  * 0123456789012345678901234567890123456789012345678901234567890123
117  *
118  * I Page-Invalid Bit:    Page is not available for address-translation
119  * P Page-Protection Bit: Store access not possible for page
120  * C Change-bit override: HW is not required to set change bit
121  *
122  * A 64 bit segmenttable entry of S390 has following format:
123  * |        P-table origin                              |      TT
124  * 0000000000111111111122222222223333333333444444444455555555556666
125  * 0123456789012345678901234567890123456789012345678901234567890123
126  *
127  * I Segment-Invalid Bit:    Segment is not available for address-translation
128  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
129  * P Page-Protection Bit: Store access not possible for page
130  * TT Type 00
131  *
132  * A 64 bit region table entry of S390 has following format:
133  * |        S-table origin                             |   TF  TTTL
134  * 0000000000111111111122222222223333333333444444444455555555556666
135  * 0123456789012345678901234567890123456789012345678901234567890123
136  *
137  * I Segment-Invalid Bit:    Segment is not available for address-translation
138  * TT Type 01
139  * TF
140  * TL Table length
141  *
142  * The 64 bit regiontable origin of S390 has following format:
143  * |      region table origon                          |       DTTL
144  * 0000000000111111111122222222223333333333444444444455555555556666
145  * 0123456789012345678901234567890123456789012345678901234567890123
146  *
147  * X Space-Switch event:
148  * G Segment-Invalid Bit:
149  * P Private-Space Bit:
150  * S Storage-Alteration:
151  * R Real space
152  * TL Table-Length:
153  *
154  * A storage key has the following format:
155  * | ACC |F|R|C|0|
156  *  0   3 4 5 6 7
157  * ACC: access key
158  * F  : fetch protection bit
159  * R  : referenced bit
160  * C  : changed bit
161  */
162 
163 /* Hardware bits in the page table entry */
164 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
165 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
166 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
167 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
168 
169 /* Software bits in the page table entry */
170 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
171 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
172 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
173 #define _PAGE_READ	0x010		/* SW pte read bit */
174 #define _PAGE_WRITE	0x020		/* SW pte write bit */
175 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
176 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
177 
178 #ifdef CONFIG_MEM_SOFT_DIRTY
179 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
180 #else
181 #define _PAGE_SOFT_DIRTY 0x000
182 #endif
183 
184 /* Set of bits not changed in pte_modify */
185 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187 
188 /*
189  * handle_pte_fault uses pte_present and pte_none to find out the pte type
190  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191  * distinguish present from not-present ptes. It is changed only with the page
192  * table lock held.
193  *
194  * The following table gives the different possible bit combinations for
195  * the pte hardware and software bits in the last 12 bits of a pte
196  * (. unassigned bit, x don't care, t swap type):
197  *
198  *				842100000000
199  *				000084210000
200  *				000000008421
201  *				.IR.uswrdy.p
202  * empty			.10.00000000
203  * swap				.11..ttttt.0
204  * prot-none, clean, old	.11.xx0000.1
205  * prot-none, clean, young	.11.xx0001.1
206  * prot-none, dirty, old	.11.xx0010.1
207  * prot-none, dirty, young	.11.xx0011.1
208  * read-only, clean, old	.11.xx0100.1
209  * read-only, clean, young	.01.xx0101.1
210  * read-only, dirty, old	.11.xx0110.1
211  * read-only, dirty, young	.01.xx0111.1
212  * read-write, clean, old	.11.xx1100.1
213  * read-write, clean, young	.01.xx1101.1
214  * read-write, dirty, old	.10.xx1110.1
215  * read-write, dirty, young	.00.xx1111.1
216  * HW-bits: R read-only, I invalid
217  * SW-bits: p present, y young, d dirty, r read, w write, s special,
218  *	    u unused, l large
219  *
220  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
221  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
223  */
224 
225 /* Bits in the segment/region table address-space-control-element */
226 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
227 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
228 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
229 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
230 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
231 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
232 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
233 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
234 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
235 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
236 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
237 
238 /* Bits in the region table entry */
239 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
240 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
241 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
242 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
243 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
244 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
245 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
246 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
247 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
248 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
249 
250 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
256 
257 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
258 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
259 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
260 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
261 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
262 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
263 
264 #ifdef CONFIG_MEM_SOFT_DIRTY
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
266 #else
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
268 #endif
269 
270 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
271 
272 /* Bits in the segment table entry */
273 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
277 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
278 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
279 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
280 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
282 
283 #define _SEGMENT_ENTRY		(0)
284 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
285 
286 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
287 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
288 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
289 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
290 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
291 
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294 #else
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296 #endif
297 
298 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
299 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
300 
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303 
304 #define _REGION1_SHIFT	53
305 #define _REGION2_SHIFT	42
306 #define _REGION3_SHIFT	31
307 #define _SEGMENT_SHIFT	20
308 
309 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
314 
315 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
319 
320 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
324 
325 #define PMD_SHIFT	_SEGMENT_SHIFT
326 #define PUD_SHIFT	_REGION3_SHIFT
327 #define P4D_SHIFT	_REGION2_SHIFT
328 #define PGDIR_SHIFT	_REGION1_SHIFT
329 
330 #define PMD_SIZE	_SEGMENT_SIZE
331 #define PUD_SIZE	_REGION3_SIZE
332 #define P4D_SIZE	_REGION2_SIZE
333 #define PGDIR_SIZE	_REGION1_SIZE
334 
335 #define PMD_MASK	_SEGMENT_MASK
336 #define PUD_MASK	_REGION3_MASK
337 #define P4D_MASK	_REGION2_MASK
338 #define PGDIR_MASK	_REGION1_MASK
339 
340 #define PTRS_PER_PTE	_PAGE_ENTRIES
341 #define PTRS_PER_PMD	_CRST_ENTRIES
342 #define PTRS_PER_PUD	_CRST_ENTRIES
343 #define PTRS_PER_P4D	_CRST_ENTRIES
344 #define PTRS_PER_PGD	_CRST_ENTRIES
345 
346 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
347 
348 /*
349  * Segment table and region3 table entry encoding
350  * (R = read-only, I = invalid, y = young bit):
351  *				dy..R...I...wr
352  * prot-none, clean, old	00..1...1...00
353  * prot-none, clean, young	01..1...1...00
354  * prot-none, dirty, old	10..1...1...00
355  * prot-none, dirty, young	11..1...1...00
356  * read-only, clean, old	00..1...1...01
357  * read-only, clean, young	01..1...0...01
358  * read-only, dirty, old	10..1...1...01
359  * read-only, dirty, young	11..1...0...01
360  * read-write, clean, old	00..1...1...11
361  * read-write, clean, young	01..1...0...11
362  * read-write, dirty, old	10..0...1...11
363  * read-write, dirty, young	11..0...0...11
364  * The segment table origin is used to distinguish empty (origin==0) from
365  * read-write, old segment table entries (origin!=0)
366  * HW-bits: R read-only, I invalid
367  * SW-bits: y young, d dirty, r read, w write
368  */
369 
370 /* Page status table bits for virtualization */
371 #define PGSTE_ACC_BITS	0xf000000000000000UL
372 #define PGSTE_FP_BIT	0x0800000000000000UL
373 #define PGSTE_PCL_BIT	0x0080000000000000UL
374 #define PGSTE_HR_BIT	0x0040000000000000UL
375 #define PGSTE_HC_BIT	0x0020000000000000UL
376 #define PGSTE_GR_BIT	0x0004000000000000UL
377 #define PGSTE_GC_BIT	0x0002000000000000UL
378 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
379 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
380 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
381 
382 /* Guest Page State used for virtualization */
383 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
390 
391 /*
392  * A user page table pointer has the space-switch-event bit, the
393  * private-space-control bit and the storage-alteration-event-control
394  * bit set. A kernel page table pointer doesn't need them.
395  */
396 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 				 _ASCE_ALT_EVENT)
398 
399 /*
400  * Page protection definitions.
401  */
402 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 				 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 				 _PAGE_INVALID | _PAGE_PROTECT)
411 
412 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 				 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 				  _PAGE_YOUNG |	_PAGE_DIRTY)
420 
421 /*
422  * On s390 the page table entry has an invalid bit and a read-only bit.
423  * Read permission implies execute permission and write permission
424  * implies read permission.
425  */
426          /*xwr*/
427 #define __P000	PAGE_NONE
428 #define __P001	PAGE_RO
429 #define __P010	PAGE_RO
430 #define __P011	PAGE_RO
431 #define __P100	PAGE_RX
432 #define __P101	PAGE_RX
433 #define __P110	PAGE_RX
434 #define __P111	PAGE_RX
435 
436 #define __S000	PAGE_NONE
437 #define __S001	PAGE_RO
438 #define __S010	PAGE_RW
439 #define __S011	PAGE_RW
440 #define __S100	PAGE_RX
441 #define __S101	PAGE_RX
442 #define __S110	PAGE_RWX
443 #define __S111	PAGE_RWX
444 
445 /*
446  * Segment entry (large page) protection definitions.
447  */
448 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
449 				 _SEGMENT_ENTRY_PROTECT)
450 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
451 				 _SEGMENT_ENTRY_READ | \
452 				 _SEGMENT_ENTRY_NOEXEC)
453 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
454 				 _SEGMENT_ENTRY_READ)
455 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
456 				 _SEGMENT_ENTRY_WRITE | \
457 				 _SEGMENT_ENTRY_NOEXEC)
458 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
459 				 _SEGMENT_ENTRY_WRITE)
460 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
461 				 _SEGMENT_ENTRY_LARGE |	\
462 				 _SEGMENT_ENTRY_READ |	\
463 				 _SEGMENT_ENTRY_WRITE | \
464 				 _SEGMENT_ENTRY_YOUNG | \
465 				 _SEGMENT_ENTRY_DIRTY | \
466 				 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
468 				 _SEGMENT_ENTRY_LARGE |	\
469 				 _SEGMENT_ENTRY_READ |	\
470 				 _SEGMENT_ENTRY_YOUNG |	\
471 				 _SEGMENT_ENTRY_PROTECT | \
472 				 _SEGMENT_ENTRY_NOEXEC)
473 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
474 				 _SEGMENT_ENTRY_LARGE |	\
475 				 _SEGMENT_ENTRY_READ |	\
476 				 _SEGMENT_ENTRY_WRITE | \
477 				 _SEGMENT_ENTRY_YOUNG |	\
478 				 _SEGMENT_ENTRY_DIRTY)
479 
480 /*
481  * Region3 entry (large page) protection definitions.
482  */
483 
484 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
485 				 _REGION3_ENTRY_LARGE |	 \
486 				 _REGION3_ENTRY_READ |	 \
487 				 _REGION3_ENTRY_WRITE |	 \
488 				 _REGION3_ENTRY_YOUNG |	 \
489 				 _REGION3_ENTRY_DIRTY | \
490 				 _REGION_ENTRY_NOEXEC)
491 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 				   _REGION3_ENTRY_LARGE |  \
493 				   _REGION3_ENTRY_READ |   \
494 				   _REGION3_ENTRY_YOUNG |  \
495 				   _REGION_ENTRY_PROTECT | \
496 				   _REGION_ENTRY_NOEXEC)
497 
mm_p4d_folded(struct mm_struct * mm)498 static inline bool mm_p4d_folded(struct mm_struct *mm)
499 {
500 	return mm->context.asce_limit <= _REGION1_SIZE;
501 }
502 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
503 
mm_pud_folded(struct mm_struct * mm)504 static inline bool mm_pud_folded(struct mm_struct *mm)
505 {
506 	return mm->context.asce_limit <= _REGION2_SIZE;
507 }
508 #define mm_pud_folded(mm) mm_pud_folded(mm)
509 
mm_pmd_folded(struct mm_struct * mm)510 static inline bool mm_pmd_folded(struct mm_struct *mm)
511 {
512 	return mm->context.asce_limit <= _REGION3_SIZE;
513 }
514 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
515 
mm_has_pgste(struct mm_struct * mm)516 static inline int mm_has_pgste(struct mm_struct *mm)
517 {
518 #ifdef CONFIG_PGSTE
519 	if (unlikely(mm->context.has_pgste))
520 		return 1;
521 #endif
522 	return 0;
523 }
524 
mm_is_protected(struct mm_struct * mm)525 static inline int mm_is_protected(struct mm_struct *mm)
526 {
527 #ifdef CONFIG_PGSTE
528 	if (unlikely(atomic_read(&mm->context.is_protected)))
529 		return 1;
530 #endif
531 	return 0;
532 }
533 
mm_alloc_pgste(struct mm_struct * mm)534 static inline int mm_alloc_pgste(struct mm_struct *mm)
535 {
536 #ifdef CONFIG_PGSTE
537 	if (unlikely(mm->context.alloc_pgste))
538 		return 1;
539 #endif
540 	return 0;
541 }
542 
543 /*
544  * In the case that a guest uses storage keys
545  * faults should no longer be backed by zero pages
546  */
547 #define mm_forbids_zeropage mm_has_pgste
mm_uses_skeys(struct mm_struct * mm)548 static inline int mm_uses_skeys(struct mm_struct *mm)
549 {
550 #ifdef CONFIG_PGSTE
551 	if (mm->context.uses_skeys)
552 		return 1;
553 #endif
554 	return 0;
555 }
556 
csp(unsigned int * ptr,unsigned int old,unsigned int new)557 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
558 {
559 	register unsigned long reg2 asm("2") = old;
560 	register unsigned long reg3 asm("3") = new;
561 	unsigned long address = (unsigned long)ptr | 1;
562 
563 	asm volatile(
564 		"	csp	%0,%3"
565 		: "+d" (reg2), "+m" (*ptr)
566 		: "d" (reg3), "d" (address)
567 		: "cc");
568 }
569 
cspg(unsigned long * ptr,unsigned long old,unsigned long new)570 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
571 {
572 	register unsigned long reg2 asm("2") = old;
573 	register unsigned long reg3 asm("3") = new;
574 	unsigned long address = (unsigned long)ptr | 1;
575 
576 	asm volatile(
577 		"	.insn	rre,0xb98a0000,%0,%3"
578 		: "+d" (reg2), "+m" (*ptr)
579 		: "d" (reg3), "d" (address)
580 		: "cc");
581 }
582 
583 #define CRDTE_DTT_PAGE		0x00UL
584 #define CRDTE_DTT_SEGMENT	0x10UL
585 #define CRDTE_DTT_REGION3	0x14UL
586 #define CRDTE_DTT_REGION2	0x18UL
587 #define CRDTE_DTT_REGION1	0x1cUL
588 
crdte(unsigned long old,unsigned long new,unsigned long table,unsigned long dtt,unsigned long address,unsigned long asce)589 static inline void crdte(unsigned long old, unsigned long new,
590 			 unsigned long table, unsigned long dtt,
591 			 unsigned long address, unsigned long asce)
592 {
593 	register unsigned long reg2 asm("2") = old;
594 	register unsigned long reg3 asm("3") = new;
595 	register unsigned long reg4 asm("4") = table | dtt;
596 	register unsigned long reg5 asm("5") = address;
597 
598 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
599 		     : "+d" (reg2)
600 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
601 		     : "memory", "cc");
602 }
603 
604 /*
605  * pgd/p4d/pud/pmd/pte query functions
606  */
pgd_folded(pgd_t pgd)607 static inline int pgd_folded(pgd_t pgd)
608 {
609 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
610 }
611 
pgd_present(pgd_t pgd)612 static inline int pgd_present(pgd_t pgd)
613 {
614 	if (pgd_folded(pgd))
615 		return 1;
616 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
617 }
618 
pgd_none(pgd_t pgd)619 static inline int pgd_none(pgd_t pgd)
620 {
621 	if (pgd_folded(pgd))
622 		return 0;
623 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
624 }
625 
pgd_bad(pgd_t pgd)626 static inline int pgd_bad(pgd_t pgd)
627 {
628 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
629 		return 0;
630 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
631 }
632 
pgd_pfn(pgd_t pgd)633 static inline unsigned long pgd_pfn(pgd_t pgd)
634 {
635 	unsigned long origin_mask;
636 
637 	origin_mask = _REGION_ENTRY_ORIGIN;
638 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
639 }
640 
p4d_folded(p4d_t p4d)641 static inline int p4d_folded(p4d_t p4d)
642 {
643 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
644 }
645 
p4d_present(p4d_t p4d)646 static inline int p4d_present(p4d_t p4d)
647 {
648 	if (p4d_folded(p4d))
649 		return 1;
650 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
651 }
652 
p4d_none(p4d_t p4d)653 static inline int p4d_none(p4d_t p4d)
654 {
655 	if (p4d_folded(p4d))
656 		return 0;
657 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
658 }
659 
p4d_pfn(p4d_t p4d)660 static inline unsigned long p4d_pfn(p4d_t p4d)
661 {
662 	unsigned long origin_mask;
663 
664 	origin_mask = _REGION_ENTRY_ORIGIN;
665 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
666 }
667 
pud_folded(pud_t pud)668 static inline int pud_folded(pud_t pud)
669 {
670 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
671 }
672 
pud_present(pud_t pud)673 static inline int pud_present(pud_t pud)
674 {
675 	if (pud_folded(pud))
676 		return 1;
677 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
678 }
679 
pud_none(pud_t pud)680 static inline int pud_none(pud_t pud)
681 {
682 	if (pud_folded(pud))
683 		return 0;
684 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
685 }
686 
687 #define pud_leaf	pud_large
pud_large(pud_t pud)688 static inline int pud_large(pud_t pud)
689 {
690 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
691 		return 0;
692 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
693 }
694 
695 #define pmd_leaf	pmd_large
pmd_large(pmd_t pmd)696 static inline int pmd_large(pmd_t pmd)
697 {
698 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
699 }
700 
pmd_bad(pmd_t pmd)701 static inline int pmd_bad(pmd_t pmd)
702 {
703 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
704 		return 1;
705 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
706 }
707 
pud_bad(pud_t pud)708 static inline int pud_bad(pud_t pud)
709 {
710 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
711 
712 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
713 		return 1;
714 	if (type < _REGION_ENTRY_TYPE_R3)
715 		return 0;
716 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
717 }
718 
p4d_bad(p4d_t p4d)719 static inline int p4d_bad(p4d_t p4d)
720 {
721 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
722 
723 	if (type > _REGION_ENTRY_TYPE_R2)
724 		return 1;
725 	if (type < _REGION_ENTRY_TYPE_R2)
726 		return 0;
727 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
728 }
729 
pmd_present(pmd_t pmd)730 static inline int pmd_present(pmd_t pmd)
731 {
732 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
733 }
734 
pmd_none(pmd_t pmd)735 static inline int pmd_none(pmd_t pmd)
736 {
737 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
738 }
739 
740 #define pmd_write pmd_write
pmd_write(pmd_t pmd)741 static inline int pmd_write(pmd_t pmd)
742 {
743 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
744 }
745 
746 #define pud_write pud_write
pud_write(pud_t pud)747 static inline int pud_write(pud_t pud)
748 {
749 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
750 }
751 
pmd_dirty(pmd_t pmd)752 static inline int pmd_dirty(pmd_t pmd)
753 {
754 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
755 }
756 
pmd_young(pmd_t pmd)757 static inline int pmd_young(pmd_t pmd)
758 {
759 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
760 }
761 
pte_present(pte_t pte)762 static inline int pte_present(pte_t pte)
763 {
764 	/* Bit pattern: (pte & 0x001) == 0x001 */
765 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
766 }
767 
pte_none(pte_t pte)768 static inline int pte_none(pte_t pte)
769 {
770 	/* Bit pattern: pte == 0x400 */
771 	return pte_val(pte) == _PAGE_INVALID;
772 }
773 
pte_swap(pte_t pte)774 static inline int pte_swap(pte_t pte)
775 {
776 	/* Bit pattern: (pte & 0x201) == 0x200 */
777 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
778 		== _PAGE_PROTECT;
779 }
780 
pte_special(pte_t pte)781 static inline int pte_special(pte_t pte)
782 {
783 	return (pte_val(pte) & _PAGE_SPECIAL);
784 }
785 
786 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)787 static inline int pte_same(pte_t a, pte_t b)
788 {
789 	return pte_val(a) == pte_val(b);
790 }
791 
792 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)793 static inline int pte_protnone(pte_t pte)
794 {
795 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
796 }
797 
pmd_protnone(pmd_t pmd)798 static inline int pmd_protnone(pmd_t pmd)
799 {
800 	/* pmd_large(pmd) implies pmd_present(pmd) */
801 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
802 }
803 #endif
804 
pte_soft_dirty(pte_t pte)805 static inline int pte_soft_dirty(pte_t pte)
806 {
807 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
808 }
809 #define pte_swp_soft_dirty pte_soft_dirty
810 
pte_mksoft_dirty(pte_t pte)811 static inline pte_t pte_mksoft_dirty(pte_t pte)
812 {
813 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
814 	return pte;
815 }
816 #define pte_swp_mksoft_dirty pte_mksoft_dirty
817 
pte_clear_soft_dirty(pte_t pte)818 static inline pte_t pte_clear_soft_dirty(pte_t pte)
819 {
820 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
821 	return pte;
822 }
823 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
824 
pmd_soft_dirty(pmd_t pmd)825 static inline int pmd_soft_dirty(pmd_t pmd)
826 {
827 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
828 }
829 
pmd_mksoft_dirty(pmd_t pmd)830 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
831 {
832 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
833 	return pmd;
834 }
835 
pmd_clear_soft_dirty(pmd_t pmd)836 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
837 {
838 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
839 	return pmd;
840 }
841 
842 /*
843  * query functions pte_write/pte_dirty/pte_young only work if
844  * pte_present() is true. Undefined behaviour if not..
845  */
pte_write(pte_t pte)846 static inline int pte_write(pte_t pte)
847 {
848 	return (pte_val(pte) & _PAGE_WRITE) != 0;
849 }
850 
pte_dirty(pte_t pte)851 static inline int pte_dirty(pte_t pte)
852 {
853 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
854 }
855 
pte_young(pte_t pte)856 static inline int pte_young(pte_t pte)
857 {
858 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
859 }
860 
861 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)862 static inline int pte_unused(pte_t pte)
863 {
864 	return pte_val(pte) & _PAGE_UNUSED;
865 }
866 
867 /*
868  * Extract the pgprot value from the given pte while at the same time making it
869  * usable for kernel address space mappings where fault driven dirty and
870  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
871  * must not be set.
872  */
pte_pgprot(pte_t pte)873 static inline pgprot_t pte_pgprot(pte_t pte)
874 {
875 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
876 
877 	if (pte_write(pte))
878 		pte_flags |= pgprot_val(PAGE_KERNEL);
879 	else
880 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
881 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
882 
883 	return __pgprot(pte_flags);
884 }
885 
886 /*
887  * pgd/pmd/pte modification functions
888  */
889 
pgd_clear(pgd_t * pgd)890 static inline void pgd_clear(pgd_t *pgd)
891 {
892 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
893 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
894 }
895 
p4d_clear(p4d_t * p4d)896 static inline void p4d_clear(p4d_t *p4d)
897 {
898 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
899 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
900 }
901 
pud_clear(pud_t * pud)902 static inline void pud_clear(pud_t *pud)
903 {
904 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
905 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
906 }
907 
pmd_clear(pmd_t * pmdp)908 static inline void pmd_clear(pmd_t *pmdp)
909 {
910 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
911 }
912 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)913 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
914 {
915 	pte_val(*ptep) = _PAGE_INVALID;
916 }
917 
918 /*
919  * The following pte modification functions only work if
920  * pte_present() is true. Undefined behaviour if not..
921  */
pte_modify(pte_t pte,pgprot_t newprot)922 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
923 {
924 	pte_val(pte) &= _PAGE_CHG_MASK;
925 	pte_val(pte) |= pgprot_val(newprot);
926 	/*
927 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
928 	 * has the invalid bit set, clear it again for readable, young pages
929 	 */
930 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
931 		pte_val(pte) &= ~_PAGE_INVALID;
932 	/*
933 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
934 	 * protection bit set, clear it again for writable, dirty pages
935 	 */
936 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
937 		pte_val(pte) &= ~_PAGE_PROTECT;
938 	return pte;
939 }
940 
pte_wrprotect(pte_t pte)941 static inline pte_t pte_wrprotect(pte_t pte)
942 {
943 	pte_val(pte) &= ~_PAGE_WRITE;
944 	pte_val(pte) |= _PAGE_PROTECT;
945 	return pte;
946 }
947 
pte_mkwrite(pte_t pte)948 static inline pte_t pte_mkwrite(pte_t pte)
949 {
950 	pte_val(pte) |= _PAGE_WRITE;
951 	if (pte_val(pte) & _PAGE_DIRTY)
952 		pte_val(pte) &= ~_PAGE_PROTECT;
953 	return pte;
954 }
955 
pte_mkclean(pte_t pte)956 static inline pte_t pte_mkclean(pte_t pte)
957 {
958 	pte_val(pte) &= ~_PAGE_DIRTY;
959 	pte_val(pte) |= _PAGE_PROTECT;
960 	return pte;
961 }
962 
pte_mkdirty(pte_t pte)963 static inline pte_t pte_mkdirty(pte_t pte)
964 {
965 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
966 	if (pte_val(pte) & _PAGE_WRITE)
967 		pte_val(pte) &= ~_PAGE_PROTECT;
968 	return pte;
969 }
970 
pte_mkold(pte_t pte)971 static inline pte_t pte_mkold(pte_t pte)
972 {
973 	pte_val(pte) &= ~_PAGE_YOUNG;
974 	pte_val(pte) |= _PAGE_INVALID;
975 	return pte;
976 }
977 
pte_mkyoung(pte_t pte)978 static inline pte_t pte_mkyoung(pte_t pte)
979 {
980 	pte_val(pte) |= _PAGE_YOUNG;
981 	if (pte_val(pte) & _PAGE_READ)
982 		pte_val(pte) &= ~_PAGE_INVALID;
983 	return pte;
984 }
985 
pte_mkspecial(pte_t pte)986 static inline pte_t pte_mkspecial(pte_t pte)
987 {
988 	pte_val(pte) |= _PAGE_SPECIAL;
989 	return pte;
990 }
991 
992 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)993 static inline pte_t pte_mkhuge(pte_t pte)
994 {
995 	pte_val(pte) |= _PAGE_LARGE;
996 	return pte;
997 }
998 #endif
999 
1000 #define IPTE_GLOBAL	0
1001 #define	IPTE_LOCAL	1
1002 
1003 #define IPTE_NODAT	0x400
1004 #define IPTE_GUEST_ASCE	0x800
1005 
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1006 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1007 					unsigned long opt, unsigned long asce,
1008 					int local)
1009 {
1010 	unsigned long pto = (unsigned long) ptep;
1011 
1012 	if (__builtin_constant_p(opt) && opt == 0) {
1013 		/* Invalidation + TLB flush for the pte */
1014 		asm volatile(
1015 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1016 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1017 			  [m4] "i" (local));
1018 		return;
1019 	}
1020 
1021 	/* Invalidate ptes with options + TLB flush of the ptes */
1022 	opt = opt | (asce & _ASCE_ORIGIN);
1023 	asm volatile(
1024 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1025 		: [r2] "+a" (address), [r3] "+a" (opt)
1026 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1027 }
1028 
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1029 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1030 					      pte_t *ptep, int local)
1031 {
1032 	unsigned long pto = (unsigned long) ptep;
1033 
1034 	/* Invalidate a range of ptes + TLB flush of the ptes */
1035 	do {
1036 		asm volatile(
1037 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1038 			: [r2] "+a" (address), [r3] "+a" (nr)
1039 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1040 	} while (nr != 255);
1041 }
1042 
1043 /*
1044  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1045  * both clear the TLB for the unmapped pte. The reason is that
1046  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1047  * to modify an active pte. The sequence is
1048  *   1) ptep_get_and_clear
1049  *   2) set_pte_at
1050  *   3) flush_tlb_range
1051  * On s390 the tlb needs to get flushed with the modification of the pte
1052  * if the pte is active. The only way how this can be implemented is to
1053  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1054  * is a nop.
1055  */
1056 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1057 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058 
1059 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1060 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1061 					    unsigned long addr, pte_t *ptep)
1062 {
1063 	pte_t pte = *ptep;
1064 
1065 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1066 	return pte_young(pte);
1067 }
1068 
1069 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1070 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1071 					 unsigned long address, pte_t *ptep)
1072 {
1073 	return ptep_test_and_clear_young(vma, address, ptep);
1074 }
1075 
1076 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1077 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1078 				       unsigned long addr, pte_t *ptep)
1079 {
1080 	pte_t res;
1081 
1082 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1083 	if (mm_is_protected(mm) && pte_present(res))
1084 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1085 	return res;
1086 }
1087 
1088 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1089 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1090 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1091 			     pte_t *, pte_t, pte_t);
1092 
1093 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1094 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1095 				     unsigned long addr, pte_t *ptep)
1096 {
1097 	pte_t res;
1098 
1099 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1100 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1101 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1102 	return res;
1103 }
1104 
1105 /*
1106  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1107  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1108  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1109  * cannot be accessed while the batched unmap is running. In this case
1110  * full==1 and a simple pte_clear is enough. See tlb.h.
1111  */
1112 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1113 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1114 					    unsigned long addr,
1115 					    pte_t *ptep, int full)
1116 {
1117 	pte_t res;
1118 
1119 	if (full) {
1120 		res = *ptep;
1121 		*ptep = __pte(_PAGE_INVALID);
1122 	} else {
1123 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1124 	}
1125 	if (mm_is_protected(mm) && pte_present(res))
1126 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1127 	return res;
1128 }
1129 
1130 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1131 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1132 				      unsigned long addr, pte_t *ptep)
1133 {
1134 	pte_t pte = *ptep;
1135 
1136 	if (pte_write(pte))
1137 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1138 }
1139 
1140 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1141 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1142 					unsigned long addr, pte_t *ptep,
1143 					pte_t entry, int dirty)
1144 {
1145 	if (pte_same(*ptep, entry))
1146 		return 0;
1147 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1148 	return 1;
1149 }
1150 
1151 /*
1152  * Additional functions to handle KVM guest page tables
1153  */
1154 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1155 		     pte_t *ptep, pte_t entry);
1156 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1157 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1158 		 pte_t *ptep, unsigned long bits);
1159 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1160 		    pte_t *ptep, int prot, unsigned long bit);
1161 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1162 		     pte_t *ptep , int reset);
1163 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1164 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1165 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1166 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1167 
1168 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1169 			    pte_t *ptep);
1170 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1171 			  unsigned char key, bool nq);
1172 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1173 			       unsigned char key, unsigned char *oldkey,
1174 			       bool nq, bool mr, bool mc);
1175 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1176 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1177 			  unsigned char *key);
1178 
1179 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1180 				unsigned long bits, unsigned long value);
1181 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1182 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1183 			unsigned long *oldpte, unsigned long *oldpgste);
1184 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1185 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1186 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1187 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1188 
1189 #define pgprot_writecombine	pgprot_writecombine
1190 pgprot_t pgprot_writecombine(pgprot_t prot);
1191 
1192 #define pgprot_writethrough	pgprot_writethrough
1193 pgprot_t pgprot_writethrough(pgprot_t prot);
1194 
1195 /*
1196  * Certain architectures need to do special things when PTEs
1197  * within a page table are directly modified.  Thus, the following
1198  * hook is made available.
1199  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)1200 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1201 			      pte_t *ptep, pte_t entry)
1202 {
1203 	if (pte_present(entry))
1204 		pte_val(entry) &= ~_PAGE_UNUSED;
1205 	if (mm_has_pgste(mm))
1206 		ptep_set_pte_at(mm, addr, ptep, entry);
1207 	else
1208 		*ptep = entry;
1209 }
1210 
1211 /*
1212  * Conversion functions: convert a page and protection to a page entry,
1213  * and a page entry and page directory to the page they refer to.
1214  */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1215 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1216 {
1217 	pte_t __pte;
1218 
1219 	pte_val(__pte) = physpage | pgprot_val(pgprot);
1220 	if (!MACHINE_HAS_NX)
1221 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1222 	return pte_mkyoung(__pte);
1223 }
1224 
mk_pte(struct page * page,pgprot_t pgprot)1225 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1226 {
1227 	unsigned long physpage = page_to_phys(page);
1228 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1229 
1230 	if (pte_write(__pte) && PageDirty(page))
1231 		__pte = pte_mkdirty(__pte);
1232 	return __pte;
1233 }
1234 
1235 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1236 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1237 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1238 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1239 
1240 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1241 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1242 
pmd_deref(pmd_t pmd)1243 static inline unsigned long pmd_deref(pmd_t pmd)
1244 {
1245 	unsigned long origin_mask;
1246 
1247 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1248 	if (pmd_large(pmd))
1249 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1250 	return pmd_val(pmd) & origin_mask;
1251 }
1252 
pmd_pfn(pmd_t pmd)1253 static inline unsigned long pmd_pfn(pmd_t pmd)
1254 {
1255 	return pmd_deref(pmd) >> PAGE_SHIFT;
1256 }
1257 
pud_deref(pud_t pud)1258 static inline unsigned long pud_deref(pud_t pud)
1259 {
1260 	unsigned long origin_mask;
1261 
1262 	origin_mask = _REGION_ENTRY_ORIGIN;
1263 	if (pud_large(pud))
1264 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1265 	return pud_val(pud) & origin_mask;
1266 }
1267 
pud_pfn(pud_t pud)1268 static inline unsigned long pud_pfn(pud_t pud)
1269 {
1270 	return pud_deref(pud) >> PAGE_SHIFT;
1271 }
1272 
1273 /*
1274  * The pgd_offset function *always* adds the index for the top-level
1275  * region/segment table. This is done to get a sequence like the
1276  * following to work:
1277  *	pgdp = pgd_offset(current->mm, addr);
1278  *	pgd = READ_ONCE(*pgdp);
1279  *	p4dp = p4d_offset(&pgd, addr);
1280  *	...
1281  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1282  * only add an index if they dereferenced the pointer.
1283  */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1284 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1285 {
1286 	unsigned long rste;
1287 	unsigned int shift;
1288 
1289 	/* Get the first entry of the top level table */
1290 	rste = pgd_val(*pgd);
1291 	/* Pick up the shift from the table type of the first entry */
1292 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1293 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1294 }
1295 
1296 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1297 
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1298 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1299 {
1300 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1301 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1302 	return (p4d_t *) pgdp;
1303 }
1304 #define p4d_offset_lockless p4d_offset_lockless
1305 
p4d_offset(pgd_t * pgdp,unsigned long address)1306 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1307 {
1308 	return p4d_offset_lockless(pgdp, *pgdp, address);
1309 }
1310 
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1311 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1312 {
1313 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1314 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1315 	return (pud_t *) p4dp;
1316 }
1317 #define pud_offset_lockless pud_offset_lockless
1318 
pud_offset(p4d_t * p4dp,unsigned long address)1319 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1320 {
1321 	return pud_offset_lockless(p4dp, *p4dp, address);
1322 }
1323 #define pud_offset pud_offset
1324 
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1325 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1326 {
1327 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1328 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1329 	return (pmd_t *) pudp;
1330 }
1331 #define pmd_offset_lockless pmd_offset_lockless
1332 
pmd_offset(pud_t * pudp,unsigned long address)1333 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1334 {
1335 	return pmd_offset_lockless(pudp, *pudp, address);
1336 }
1337 #define pmd_offset pmd_offset
1338 
pmd_page_vaddr(pmd_t pmd)1339 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1340 {
1341 	return (unsigned long) pmd_deref(pmd);
1342 }
1343 
gup_fast_permitted(unsigned long start,unsigned long end)1344 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1345 {
1346 	return end <= current->mm->context.asce_limit;
1347 }
1348 #define gup_fast_permitted gup_fast_permitted
1349 
1350 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1351 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1352 #define pte_page(x) pfn_to_page(pte_pfn(x))
1353 
1354 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1355 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1356 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1357 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1358 
pmd_wrprotect(pmd_t pmd)1359 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1360 {
1361 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1362 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1363 	return pmd;
1364 }
1365 
pmd_mkwrite(pmd_t pmd)1366 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1367 {
1368 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1369 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1370 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1371 	return pmd;
1372 }
1373 
pmd_mkclean(pmd_t pmd)1374 static inline pmd_t pmd_mkclean(pmd_t pmd)
1375 {
1376 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1377 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1378 	return pmd;
1379 }
1380 
pmd_mkdirty(pmd_t pmd)1381 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1382 {
1383 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1384 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1385 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1386 	return pmd;
1387 }
1388 
pud_wrprotect(pud_t pud)1389 static inline pud_t pud_wrprotect(pud_t pud)
1390 {
1391 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1392 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1393 	return pud;
1394 }
1395 
pud_mkwrite(pud_t pud)1396 static inline pud_t pud_mkwrite(pud_t pud)
1397 {
1398 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1399 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1400 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1401 	return pud;
1402 }
1403 
pud_mkclean(pud_t pud)1404 static inline pud_t pud_mkclean(pud_t pud)
1405 {
1406 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1407 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1408 	return pud;
1409 }
1410 
pud_mkdirty(pud_t pud)1411 static inline pud_t pud_mkdirty(pud_t pud)
1412 {
1413 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1414 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1415 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1416 	return pud;
1417 }
1418 
1419 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1420 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1421 {
1422 	/*
1423 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1424 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1425 	 */
1426 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1427 		return pgprot_val(SEGMENT_NONE);
1428 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1429 		return pgprot_val(SEGMENT_RO);
1430 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1431 		return pgprot_val(SEGMENT_RX);
1432 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1433 		return pgprot_val(SEGMENT_RW);
1434 	return pgprot_val(SEGMENT_RWX);
1435 }
1436 
pmd_mkyoung(pmd_t pmd)1437 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1438 {
1439 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1440 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1441 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1442 	return pmd;
1443 }
1444 
pmd_mkold(pmd_t pmd)1445 static inline pmd_t pmd_mkold(pmd_t pmd)
1446 {
1447 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1448 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1449 	return pmd;
1450 }
1451 
pmd_modify(pmd_t pmd,pgprot_t newprot)1452 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1453 {
1454 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1455 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1456 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1457 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1458 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1459 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1460 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1461 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1462 	return pmd;
1463 }
1464 
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1465 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1466 {
1467 	pmd_t __pmd;
1468 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1469 	return __pmd;
1470 }
1471 
1472 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1473 
__pmdp_csp(pmd_t * pmdp)1474 static inline void __pmdp_csp(pmd_t *pmdp)
1475 {
1476 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1477 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1478 }
1479 
1480 #define IDTE_GLOBAL	0
1481 #define IDTE_LOCAL	1
1482 
1483 #define IDTE_PTOA	0x0800
1484 #define IDTE_NODAT	0x1000
1485 #define IDTE_GUEST_ASCE	0x2000
1486 
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1487 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1488 					unsigned long opt, unsigned long asce,
1489 					int local)
1490 {
1491 	unsigned long sto;
1492 
1493 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1494 	if (__builtin_constant_p(opt) && opt == 0) {
1495 		/* flush without guest asce */
1496 		asm volatile(
1497 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1498 			: "+m" (*pmdp)
1499 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1500 			  [m4] "i" (local)
1501 			: "cc" );
1502 	} else {
1503 		/* flush with guest asce */
1504 		asm volatile(
1505 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1506 			: "+m" (*pmdp)
1507 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1508 			  [r3] "a" (asce), [m4] "i" (local)
1509 			: "cc" );
1510 	}
1511 }
1512 
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1513 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1514 					unsigned long opt, unsigned long asce,
1515 					int local)
1516 {
1517 	unsigned long r3o;
1518 
1519 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1520 	r3o |= _ASCE_TYPE_REGION3;
1521 	if (__builtin_constant_p(opt) && opt == 0) {
1522 		/* flush without guest asce */
1523 		asm volatile(
1524 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1525 			: "+m" (*pudp)
1526 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1527 			  [m4] "i" (local)
1528 			: "cc");
1529 	} else {
1530 		/* flush with guest asce */
1531 		asm volatile(
1532 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1533 			: "+m" (*pudp)
1534 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1535 			  [r3] "a" (asce), [m4] "i" (local)
1536 			: "cc" );
1537 	}
1538 }
1539 
1540 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1541 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1542 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1543 
1544 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1545 
1546 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1547 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1548 				pgtable_t pgtable);
1549 
1550 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1551 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1552 
1553 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1554 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1555 					unsigned long addr, pmd_t *pmdp,
1556 					pmd_t entry, int dirty)
1557 {
1558 	VM_BUG_ON(addr & ~HPAGE_MASK);
1559 
1560 	entry = pmd_mkyoung(entry);
1561 	if (dirty)
1562 		entry = pmd_mkdirty(entry);
1563 	if (pmd_val(*pmdp) == pmd_val(entry))
1564 		return 0;
1565 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1566 	return 1;
1567 }
1568 
1569 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1570 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1571 					    unsigned long addr, pmd_t *pmdp)
1572 {
1573 	pmd_t pmd = *pmdp;
1574 
1575 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1576 	return pmd_young(pmd);
1577 }
1578 
1579 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1580 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1581 					 unsigned long addr, pmd_t *pmdp)
1582 {
1583 	VM_BUG_ON(addr & ~HPAGE_MASK);
1584 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1585 }
1586 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1587 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1588 			      pmd_t *pmdp, pmd_t entry)
1589 {
1590 	if (!MACHINE_HAS_NX)
1591 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1592 	*pmdp = entry;
1593 }
1594 
pmd_mkhuge(pmd_t pmd)1595 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1596 {
1597 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1598 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1599 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1600 	return pmd;
1601 }
1602 
1603 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1604 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1605 					    unsigned long addr, pmd_t *pmdp)
1606 {
1607 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1608 }
1609 
1610 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1611 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1612 						 unsigned long addr,
1613 						 pmd_t *pmdp, int full)
1614 {
1615 	if (full) {
1616 		pmd_t pmd = *pmdp;
1617 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1618 		return pmd;
1619 	}
1620 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1621 }
1622 
1623 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1624 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1625 					  unsigned long addr, pmd_t *pmdp)
1626 {
1627 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1628 }
1629 
1630 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1631 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1632 				   unsigned long addr, pmd_t *pmdp)
1633 {
1634 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1635 
1636 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1637 }
1638 
1639 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1640 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1641 				      unsigned long addr, pmd_t *pmdp)
1642 {
1643 	pmd_t pmd = *pmdp;
1644 
1645 	if (pmd_write(pmd))
1646 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1647 }
1648 
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1649 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1650 					unsigned long address,
1651 					pmd_t *pmdp)
1652 {
1653 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1654 }
1655 #define pmdp_collapse_flush pmdp_collapse_flush
1656 
1657 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1658 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1659 
pmd_trans_huge(pmd_t pmd)1660 static inline int pmd_trans_huge(pmd_t pmd)
1661 {
1662 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1663 }
1664 
1665 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1666 static inline int has_transparent_hugepage(void)
1667 {
1668 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1669 }
1670 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1671 
1672 /*
1673  * 64 bit swap entry format:
1674  * A page-table entry has some bits we have to treat in a special way.
1675  * Bits 52 and bit 55 have to be zero, otherwise a specification
1676  * exception will occur instead of a page translation exception. The
1677  * specification exception has the bad habit not to store necessary
1678  * information in the lowcore.
1679  * Bits 54 and 63 are used to indicate the page type.
1680  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1681  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1682  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1683  * for the offset.
1684  * |			  offset			|01100|type |00|
1685  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1686  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1687  */
1688 
1689 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1690 #define __SWP_OFFSET_SHIFT	12
1691 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1692 #define __SWP_TYPE_SHIFT	2
1693 
mk_swap_pte(unsigned long type,unsigned long offset)1694 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1695 {
1696 	pte_t pte;
1697 
1698 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1699 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1700 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1701 	return pte;
1702 }
1703 
__swp_type(swp_entry_t entry)1704 static inline unsigned long __swp_type(swp_entry_t entry)
1705 {
1706 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1707 }
1708 
__swp_offset(swp_entry_t entry)1709 static inline unsigned long __swp_offset(swp_entry_t entry)
1710 {
1711 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1712 }
1713 
__swp_entry(unsigned long type,unsigned long offset)1714 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1715 {
1716 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1717 }
1718 
1719 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1720 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1721 
1722 #define kern_addr_valid(addr)   (1)
1723 
1724 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1725 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1726 extern int s390_enable_sie(void);
1727 extern int s390_enable_skey(void);
1728 extern void s390_reset_cmma(struct mm_struct *mm);
1729 
1730 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1731 #define HAVE_ARCH_UNMAPPED_AREA
1732 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1733 
1734 #endif /* _S390_PAGE_H */
1735