• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22 
23 extern pgd_t swapper_pg_dir[];
24 extern void paging_init(void);
25 
26 enum {
27 	PG_DIRECT_MAP_4K = 0,
28 	PG_DIRECT_MAP_1M,
29 	PG_DIRECT_MAP_2G,
30 	PG_DIRECT_MAP_MAX
31 };
32 
33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34 
update_page_count(int level,long count)35 static inline void update_page_count(int level, long count)
36 {
37 	if (IS_ENABLED(CONFIG_PROC_FS))
38 		atomic_long_add(count, &direct_pages_count[level]);
39 }
40 
41 struct seq_file;
42 void arch_report_meminfo(struct seq_file *m);
43 
44 /*
45  * The S390 doesn't have any external MMU info: the kernel page
46  * tables contain all the necessary information.
47  */
48 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero; used
53  * for zero-mapped memory areas etc..
54  */
55 
56 extern unsigned long empty_zero_page;
57 extern unsigned long zero_page_mask;
58 
59 #define ZERO_PAGE(vaddr) \
60 	(virt_to_page((void *)(empty_zero_page + \
61 	 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
63 
64 /* TODO: s390 cannot support io_remap_pfn_range... */
65 
66 #define FIRST_USER_ADDRESS  0UL
67 
68 #define pte_ERROR(e) \
69 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78 
79 /*
80  * The vmalloc and module area will always be on the topmost area of the
81  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83  * modules will reside. That makes sure that inter module branches always
84  * happen without trampolines and in addition the placement within a 2GB frame
85  * is branch prediction unit friendly.
86  */
87 extern unsigned long VMALLOC_START;
88 extern unsigned long VMALLOC_END;
89 #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
90 extern struct page *vmemmap;
91 
92 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93 
94 extern unsigned long MODULES_VADDR;
95 extern unsigned long MODULES_END;
96 #define MODULES_VADDR	MODULES_VADDR
97 #define MODULES_END	MODULES_END
98 #define MODULES_LEN	(1UL << 31)
99 
is_module_addr(void * addr)100 static inline int is_module_addr(void *addr)
101 {
102 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 	if (addr < (void *)MODULES_VADDR)
104 		return 0;
105 	if (addr > (void *)MODULES_END)
106 		return 0;
107 	return 1;
108 }
109 
110 /*
111  * A 64 bit pagetable entry of S390 has following format:
112  * |			 PFRA			      |0IPC|  OS  |
113  * 0000000000111111111122222222223333333333444444444455555555556666
114  * 0123456789012345678901234567890123456789012345678901234567890123
115  *
116  * I Page-Invalid Bit:    Page is not available for address-translation
117  * P Page-Protection Bit: Store access not possible for page
118  * C Change-bit override: HW is not required to set change bit
119  *
120  * A 64 bit segmenttable entry of S390 has following format:
121  * |        P-table origin                              |      TT
122  * 0000000000111111111122222222223333333333444444444455555555556666
123  * 0123456789012345678901234567890123456789012345678901234567890123
124  *
125  * I Segment-Invalid Bit:    Segment is not available for address-translation
126  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
127  * P Page-Protection Bit: Store access not possible for page
128  * TT Type 00
129  *
130  * A 64 bit region table entry of S390 has following format:
131  * |        S-table origin                             |   TF  TTTL
132  * 0000000000111111111122222222223333333333444444444455555555556666
133  * 0123456789012345678901234567890123456789012345678901234567890123
134  *
135  * I Segment-Invalid Bit:    Segment is not available for address-translation
136  * TT Type 01
137  * TF
138  * TL Table length
139  *
140  * The 64 bit regiontable origin of S390 has following format:
141  * |      region table origon                          |       DTTL
142  * 0000000000111111111122222222223333333333444444444455555555556666
143  * 0123456789012345678901234567890123456789012345678901234567890123
144  *
145  * X Space-Switch event:
146  * G Segment-Invalid Bit:
147  * P Private-Space Bit:
148  * S Storage-Alteration:
149  * R Real space
150  * TL Table-Length:
151  *
152  * A storage key has the following format:
153  * | ACC |F|R|C|0|
154  *  0   3 4 5 6 7
155  * ACC: access key
156  * F  : fetch protection bit
157  * R  : referenced bit
158  * C  : changed bit
159  */
160 
161 /* Hardware bits in the page table entry */
162 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
163 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
164 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
165 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
166 
167 /* Software bits in the page table entry */
168 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
169 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
170 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
171 #define _PAGE_READ	0x010		/* SW pte read bit */
172 #define _PAGE_WRITE	0x020		/* SW pte write bit */
173 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
174 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
175 
176 #ifdef CONFIG_MEM_SOFT_DIRTY
177 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
178 #else
179 #define _PAGE_SOFT_DIRTY 0x000
180 #endif
181 
182 /* Set of bits not changed in pte_modify */
183 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
185 
186 /*
187  * handle_pte_fault uses pte_present and pte_none to find out the pte type
188  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
189  * distinguish present from not-present ptes. It is changed only with the page
190  * table lock held.
191  *
192  * The following table gives the different possible bit combinations for
193  * the pte hardware and software bits in the last 12 bits of a pte
194  * (. unassigned bit, x don't care, t swap type):
195  *
196  *				842100000000
197  *				000084210000
198  *				000000008421
199  *				.IR.uswrdy.p
200  * empty			.10.00000000
201  * swap				.11..ttttt.0
202  * prot-none, clean, old	.11.xx0000.1
203  * prot-none, clean, young	.11.xx0001.1
204  * prot-none, dirty, old	.11.xx0010.1
205  * prot-none, dirty, young	.11.xx0011.1
206  * read-only, clean, old	.11.xx0100.1
207  * read-only, clean, young	.01.xx0101.1
208  * read-only, dirty, old	.11.xx0110.1
209  * read-only, dirty, young	.01.xx0111.1
210  * read-write, clean, old	.11.xx1100.1
211  * read-write, clean, young	.01.xx1101.1
212  * read-write, dirty, old	.10.xx1110.1
213  * read-write, dirty, young	.00.xx1111.1
214  * HW-bits: R read-only, I invalid
215  * SW-bits: p present, y young, d dirty, r read, w write, s special,
216  *	    u unused, l large
217  *
218  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
219  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
220  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
221  */
222 
223 /* Bits in the segment/region table address-space-control-element */
224 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
225 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
226 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
227 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
228 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
229 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
230 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
231 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
232 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
233 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
234 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
235 
236 /* Bits in the region table entry */
237 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
238 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
239 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
240 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
241 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
242 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
243 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
244 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
245 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
246 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
247 
248 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254 
255 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
256 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
257 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
258 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
259 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
260 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
261 
262 #ifdef CONFIG_MEM_SOFT_DIRTY
263 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
264 #else
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
266 #endif
267 
268 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
269 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
270 
271 /* Bits in the segment table entry */
272 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
273 #define _SEGMENT_ENTRY_BITS_LARGE		0xfffffffffff0ff33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
277 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
278 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
279 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
280 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
282 
283 #define _SEGMENT_ENTRY		(0)
284 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
285 
286 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
287 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
288 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
289 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
290 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
291 
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294 #else
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296 #endif
297 
298 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
299 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
300 
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303 
304 #define _REGION1_SHIFT	53
305 #define _REGION2_SHIFT	42
306 #define _REGION3_SHIFT	31
307 #define _SEGMENT_SHIFT	20
308 
309 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
314 
315 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
319 
320 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
324 
325 #define PMD_SHIFT	_SEGMENT_SHIFT
326 #define PUD_SHIFT	_REGION3_SHIFT
327 #define P4D_SHIFT	_REGION2_SHIFT
328 #define PGDIR_SHIFT	_REGION1_SHIFT
329 
330 #define PMD_SIZE	_SEGMENT_SIZE
331 #define PUD_SIZE	_REGION3_SIZE
332 #define P4D_SIZE	_REGION2_SIZE
333 #define PGDIR_SIZE	_REGION1_SIZE
334 
335 #define PMD_MASK	_SEGMENT_MASK
336 #define PUD_MASK	_REGION3_MASK
337 #define P4D_MASK	_REGION2_MASK
338 #define PGDIR_MASK	_REGION1_MASK
339 
340 #define PTRS_PER_PTE	_PAGE_ENTRIES
341 #define PTRS_PER_PMD	_CRST_ENTRIES
342 #define PTRS_PER_PUD	_CRST_ENTRIES
343 #define PTRS_PER_P4D	_CRST_ENTRIES
344 #define PTRS_PER_PGD	_CRST_ENTRIES
345 
346 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
347 
348 /*
349  * Segment table and region3 table entry encoding
350  * (R = read-only, I = invalid, y = young bit):
351  *				dy..R...I...wr
352  * prot-none, clean, old	00..1...1...00
353  * prot-none, clean, young	01..1...1...00
354  * prot-none, dirty, old	10..1...1...00
355  * prot-none, dirty, young	11..1...1...00
356  * read-only, clean, old	00..1...1...01
357  * read-only, clean, young	01..1...0...01
358  * read-only, dirty, old	10..1...1...01
359  * read-only, dirty, young	11..1...0...01
360  * read-write, clean, old	00..1...1...11
361  * read-write, clean, young	01..1...0...11
362  * read-write, dirty, old	10..0...1...11
363  * read-write, dirty, young	11..0...0...11
364  * The segment table origin is used to distinguish empty (origin==0) from
365  * read-write, old segment table entries (origin!=0)
366  * HW-bits: R read-only, I invalid
367  * SW-bits: y young, d dirty, r read, w write
368  */
369 
370 /* Page status table bits for virtualization */
371 #define PGSTE_ACC_BITS	0xf000000000000000UL
372 #define PGSTE_FP_BIT	0x0800000000000000UL
373 #define PGSTE_PCL_BIT	0x0080000000000000UL
374 #define PGSTE_HR_BIT	0x0040000000000000UL
375 #define PGSTE_HC_BIT	0x0020000000000000UL
376 #define PGSTE_GR_BIT	0x0004000000000000UL
377 #define PGSTE_GC_BIT	0x0002000000000000UL
378 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
379 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
380 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
381 
382 /* Guest Page State used for virtualization */
383 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
390 
391 /*
392  * A user page table pointer has the space-switch-event bit, the
393  * private-space-control bit and the storage-alteration-event-control
394  * bit set. A kernel page table pointer doesn't need them.
395  */
396 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 				 _ASCE_ALT_EVENT)
398 
399 /*
400  * Page protection definitions.
401  */
402 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 				 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 				 _PAGE_INVALID | _PAGE_PROTECT)
411 
412 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 				 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 				  _PAGE_YOUNG |	_PAGE_DIRTY)
420 
421 /*
422  * On s390 the page table entry has an invalid bit and a read-only bit.
423  * Read permission implies execute permission and write permission
424  * implies read permission.
425  */
426          /*xwr*/
427 #define __P000	PAGE_NONE
428 #define __P001	PAGE_RO
429 #define __P010	PAGE_RO
430 #define __P011	PAGE_RO
431 #define __P100	PAGE_RX
432 #define __P101	PAGE_RX
433 #define __P110	PAGE_RX
434 #define __P111	PAGE_RX
435 
436 #define __S000	PAGE_NONE
437 #define __S001	PAGE_RO
438 #define __S010	PAGE_RW
439 #define __S011	PAGE_RW
440 #define __S100	PAGE_RX
441 #define __S101	PAGE_RX
442 #define __S110	PAGE_RWX
443 #define __S111	PAGE_RWX
444 
445 /*
446  * Segment entry (large page) protection definitions.
447  */
448 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
449 				 _SEGMENT_ENTRY_PROTECT)
450 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
451 				 _SEGMENT_ENTRY_READ | \
452 				 _SEGMENT_ENTRY_NOEXEC)
453 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
454 				 _SEGMENT_ENTRY_READ)
455 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
456 				 _SEGMENT_ENTRY_WRITE | \
457 				 _SEGMENT_ENTRY_NOEXEC)
458 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
459 				 _SEGMENT_ENTRY_WRITE)
460 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
461 				 _SEGMENT_ENTRY_LARGE |	\
462 				 _SEGMENT_ENTRY_READ |	\
463 				 _SEGMENT_ENTRY_WRITE | \
464 				 _SEGMENT_ENTRY_YOUNG | \
465 				 _SEGMENT_ENTRY_DIRTY | \
466 				 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
468 				 _SEGMENT_ENTRY_LARGE |	\
469 				 _SEGMENT_ENTRY_READ |	\
470 				 _SEGMENT_ENTRY_YOUNG |	\
471 				 _SEGMENT_ENTRY_PROTECT | \
472 				 _SEGMENT_ENTRY_NOEXEC)
473 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
474 				 _SEGMENT_ENTRY_LARGE |	\
475 				 _SEGMENT_ENTRY_READ |	\
476 				 _SEGMENT_ENTRY_WRITE | \
477 				 _SEGMENT_ENTRY_YOUNG |	\
478 				 _SEGMENT_ENTRY_DIRTY)
479 
480 /*
481  * Region3 entry (large page) protection definitions.
482  */
483 
484 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
485 				 _REGION3_ENTRY_LARGE |	 \
486 				 _REGION3_ENTRY_READ |	 \
487 				 _REGION3_ENTRY_WRITE |	 \
488 				 _REGION3_ENTRY_YOUNG |	 \
489 				 _REGION3_ENTRY_DIRTY | \
490 				 _REGION_ENTRY_NOEXEC)
491 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 				   _REGION3_ENTRY_LARGE |  \
493 				   _REGION3_ENTRY_READ |   \
494 				   _REGION3_ENTRY_YOUNG |  \
495 				   _REGION_ENTRY_PROTECT | \
496 				   _REGION_ENTRY_NOEXEC)
497 
mm_p4d_folded(struct mm_struct * mm)498 static inline bool mm_p4d_folded(struct mm_struct *mm)
499 {
500 	return mm->context.asce_limit <= _REGION1_SIZE;
501 }
502 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
503 
mm_pud_folded(struct mm_struct * mm)504 static inline bool mm_pud_folded(struct mm_struct *mm)
505 {
506 	return mm->context.asce_limit <= _REGION2_SIZE;
507 }
508 #define mm_pud_folded(mm) mm_pud_folded(mm)
509 
mm_pmd_folded(struct mm_struct * mm)510 static inline bool mm_pmd_folded(struct mm_struct *mm)
511 {
512 	return mm->context.asce_limit <= _REGION3_SIZE;
513 }
514 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
515 
mm_has_pgste(struct mm_struct * mm)516 static inline int mm_has_pgste(struct mm_struct *mm)
517 {
518 #ifdef CONFIG_PGSTE
519 	if (unlikely(mm->context.has_pgste))
520 		return 1;
521 #endif
522 	return 0;
523 }
524 
mm_alloc_pgste(struct mm_struct * mm)525 static inline int mm_alloc_pgste(struct mm_struct *mm)
526 {
527 #ifdef CONFIG_PGSTE
528 	if (unlikely(mm->context.alloc_pgste))
529 		return 1;
530 #endif
531 	return 0;
532 }
533 
534 /*
535  * In the case that a guest uses storage keys
536  * faults should no longer be backed by zero pages
537  */
538 #define mm_forbids_zeropage mm_has_pgste
mm_uses_skeys(struct mm_struct * mm)539 static inline int mm_uses_skeys(struct mm_struct *mm)
540 {
541 #ifdef CONFIG_PGSTE
542 	if (mm->context.uses_skeys)
543 		return 1;
544 #endif
545 	return 0;
546 }
547 
csp(unsigned int * ptr,unsigned int old,unsigned int new)548 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
549 {
550 	register unsigned long reg2 asm("2") = old;
551 	register unsigned long reg3 asm("3") = new;
552 	unsigned long address = (unsigned long)ptr | 1;
553 
554 	asm volatile(
555 		"	csp	%0,%3"
556 		: "+d" (reg2), "+m" (*ptr)
557 		: "d" (reg3), "d" (address)
558 		: "cc");
559 }
560 
cspg(unsigned long * ptr,unsigned long old,unsigned long new)561 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
562 {
563 	register unsigned long reg2 asm("2") = old;
564 	register unsigned long reg3 asm("3") = new;
565 	unsigned long address = (unsigned long)ptr | 1;
566 
567 	asm volatile(
568 		"	.insn	rre,0xb98a0000,%0,%3"
569 		: "+d" (reg2), "+m" (*ptr)
570 		: "d" (reg3), "d" (address)
571 		: "cc");
572 }
573 
574 #define CRDTE_DTT_PAGE		0x00UL
575 #define CRDTE_DTT_SEGMENT	0x10UL
576 #define CRDTE_DTT_REGION3	0x14UL
577 #define CRDTE_DTT_REGION2	0x18UL
578 #define CRDTE_DTT_REGION1	0x1cUL
579 
crdte(unsigned long old,unsigned long new,unsigned long table,unsigned long dtt,unsigned long address,unsigned long asce)580 static inline void crdte(unsigned long old, unsigned long new,
581 			 unsigned long table, unsigned long dtt,
582 			 unsigned long address, unsigned long asce)
583 {
584 	register unsigned long reg2 asm("2") = old;
585 	register unsigned long reg3 asm("3") = new;
586 	register unsigned long reg4 asm("4") = table | dtt;
587 	register unsigned long reg5 asm("5") = address;
588 
589 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
590 		     : "+d" (reg2)
591 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
592 		     : "memory", "cc");
593 }
594 
595 /*
596  * pgd/p4d/pud/pmd/pte query functions
597  */
pgd_folded(pgd_t pgd)598 static inline int pgd_folded(pgd_t pgd)
599 {
600 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
601 }
602 
pgd_present(pgd_t pgd)603 static inline int pgd_present(pgd_t pgd)
604 {
605 	if (pgd_folded(pgd))
606 		return 1;
607 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
608 }
609 
pgd_none(pgd_t pgd)610 static inline int pgd_none(pgd_t pgd)
611 {
612 	if (pgd_folded(pgd))
613 		return 0;
614 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
615 }
616 
pgd_bad(pgd_t pgd)617 static inline int pgd_bad(pgd_t pgd)
618 {
619 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
620 		return 0;
621 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
622 }
623 
pgd_pfn(pgd_t pgd)624 static inline unsigned long pgd_pfn(pgd_t pgd)
625 {
626 	unsigned long origin_mask;
627 
628 	origin_mask = _REGION_ENTRY_ORIGIN;
629 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
630 }
631 
p4d_folded(p4d_t p4d)632 static inline int p4d_folded(p4d_t p4d)
633 {
634 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
635 }
636 
p4d_present(p4d_t p4d)637 static inline int p4d_present(p4d_t p4d)
638 {
639 	if (p4d_folded(p4d))
640 		return 1;
641 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
642 }
643 
p4d_none(p4d_t p4d)644 static inline int p4d_none(p4d_t p4d)
645 {
646 	if (p4d_folded(p4d))
647 		return 0;
648 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
649 }
650 
p4d_pfn(p4d_t p4d)651 static inline unsigned long p4d_pfn(p4d_t p4d)
652 {
653 	unsigned long origin_mask;
654 
655 	origin_mask = _REGION_ENTRY_ORIGIN;
656 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
657 }
658 
pud_folded(pud_t pud)659 static inline int pud_folded(pud_t pud)
660 {
661 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
662 }
663 
pud_present(pud_t pud)664 static inline int pud_present(pud_t pud)
665 {
666 	if (pud_folded(pud))
667 		return 1;
668 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
669 }
670 
pud_none(pud_t pud)671 static inline int pud_none(pud_t pud)
672 {
673 	if (pud_folded(pud))
674 		return 0;
675 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
676 }
677 
pud_large(pud_t pud)678 static inline int pud_large(pud_t pud)
679 {
680 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
681 		return 0;
682 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
683 }
684 
pud_pfn(pud_t pud)685 static inline unsigned long pud_pfn(pud_t pud)
686 {
687 	unsigned long origin_mask;
688 
689 	origin_mask = _REGION_ENTRY_ORIGIN;
690 	if (pud_large(pud))
691 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
692 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
693 }
694 
pmd_large(pmd_t pmd)695 static inline int pmd_large(pmd_t pmd)
696 {
697 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
698 }
699 
pmd_bad(pmd_t pmd)700 static inline int pmd_bad(pmd_t pmd)
701 {
702 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
703 		return 1;
704 	if (pmd_large(pmd))
705 		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
706 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
707 }
708 
pud_bad(pud_t pud)709 static inline int pud_bad(pud_t pud)
710 {
711 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
712 
713 	if (type > _REGION_ENTRY_TYPE_R3)
714 		return 1;
715 	if (type < _REGION_ENTRY_TYPE_R3)
716 		return 0;
717 	if (pud_large(pud))
718 		return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
719 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
720 }
721 
p4d_bad(p4d_t p4d)722 static inline int p4d_bad(p4d_t p4d)
723 {
724 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
725 
726 	if (type > _REGION_ENTRY_TYPE_R2)
727 		return 1;
728 	if (type < _REGION_ENTRY_TYPE_R2)
729 		return 0;
730 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
731 }
732 
pmd_present(pmd_t pmd)733 static inline int pmd_present(pmd_t pmd)
734 {
735 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
736 }
737 
pmd_none(pmd_t pmd)738 static inline int pmd_none(pmd_t pmd)
739 {
740 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
741 }
742 
pmd_pfn(pmd_t pmd)743 static inline unsigned long pmd_pfn(pmd_t pmd)
744 {
745 	unsigned long origin_mask;
746 
747 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
748 	if (pmd_large(pmd))
749 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
750 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
751 }
752 
753 #define pmd_write pmd_write
pmd_write(pmd_t pmd)754 static inline int pmd_write(pmd_t pmd)
755 {
756 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
757 }
758 
759 #define pud_write pud_write
pud_write(pud_t pud)760 static inline int pud_write(pud_t pud)
761 {
762 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
763 }
764 
pmd_dirty(pmd_t pmd)765 static inline int pmd_dirty(pmd_t pmd)
766 {
767 	int dirty = 1;
768 	if (pmd_large(pmd))
769 		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
770 	return dirty;
771 }
772 
pmd_young(pmd_t pmd)773 static inline int pmd_young(pmd_t pmd)
774 {
775 	int young = 1;
776 	if (pmd_large(pmd))
777 		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
778 	return young;
779 }
780 
pte_present(pte_t pte)781 static inline int pte_present(pte_t pte)
782 {
783 	/* Bit pattern: (pte & 0x001) == 0x001 */
784 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
785 }
786 
pte_none(pte_t pte)787 static inline int pte_none(pte_t pte)
788 {
789 	/* Bit pattern: pte == 0x400 */
790 	return pte_val(pte) == _PAGE_INVALID;
791 }
792 
pte_swap(pte_t pte)793 static inline int pte_swap(pte_t pte)
794 {
795 	/* Bit pattern: (pte & 0x201) == 0x200 */
796 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
797 		== _PAGE_PROTECT;
798 }
799 
pte_special(pte_t pte)800 static inline int pte_special(pte_t pte)
801 {
802 	return (pte_val(pte) & _PAGE_SPECIAL);
803 }
804 
805 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)806 static inline int pte_same(pte_t a, pte_t b)
807 {
808 	return pte_val(a) == pte_val(b);
809 }
810 
811 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)812 static inline int pte_protnone(pte_t pte)
813 {
814 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
815 }
816 
pmd_protnone(pmd_t pmd)817 static inline int pmd_protnone(pmd_t pmd)
818 {
819 	/* pmd_large(pmd) implies pmd_present(pmd) */
820 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
821 }
822 #endif
823 
pte_soft_dirty(pte_t pte)824 static inline int pte_soft_dirty(pte_t pte)
825 {
826 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
827 }
828 #define pte_swp_soft_dirty pte_soft_dirty
829 
pte_mksoft_dirty(pte_t pte)830 static inline pte_t pte_mksoft_dirty(pte_t pte)
831 {
832 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
833 	return pte;
834 }
835 #define pte_swp_mksoft_dirty pte_mksoft_dirty
836 
pte_clear_soft_dirty(pte_t pte)837 static inline pte_t pte_clear_soft_dirty(pte_t pte)
838 {
839 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
840 	return pte;
841 }
842 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
843 
pmd_soft_dirty(pmd_t pmd)844 static inline int pmd_soft_dirty(pmd_t pmd)
845 {
846 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
847 }
848 
pmd_mksoft_dirty(pmd_t pmd)849 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
850 {
851 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
852 	return pmd;
853 }
854 
pmd_clear_soft_dirty(pmd_t pmd)855 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
856 {
857 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
858 	return pmd;
859 }
860 
861 /*
862  * query functions pte_write/pte_dirty/pte_young only work if
863  * pte_present() is true. Undefined behaviour if not..
864  */
pte_write(pte_t pte)865 static inline int pte_write(pte_t pte)
866 {
867 	return (pte_val(pte) & _PAGE_WRITE) != 0;
868 }
869 
pte_dirty(pte_t pte)870 static inline int pte_dirty(pte_t pte)
871 {
872 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
873 }
874 
pte_young(pte_t pte)875 static inline int pte_young(pte_t pte)
876 {
877 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
878 }
879 
880 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)881 static inline int pte_unused(pte_t pte)
882 {
883 	return pte_val(pte) & _PAGE_UNUSED;
884 }
885 
886 /*
887  * pgd/pmd/pte modification functions
888  */
889 
pgd_clear(pgd_t * pgd)890 static inline void pgd_clear(pgd_t *pgd)
891 {
892 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
893 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
894 }
895 
p4d_clear(p4d_t * p4d)896 static inline void p4d_clear(p4d_t *p4d)
897 {
898 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
899 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
900 }
901 
pud_clear(pud_t * pud)902 static inline void pud_clear(pud_t *pud)
903 {
904 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
905 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
906 }
907 
pmd_clear(pmd_t * pmdp)908 static inline void pmd_clear(pmd_t *pmdp)
909 {
910 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
911 }
912 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)913 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
914 {
915 	pte_val(*ptep) = _PAGE_INVALID;
916 }
917 
918 /*
919  * The following pte modification functions only work if
920  * pte_present() is true. Undefined behaviour if not..
921  */
pte_modify(pte_t pte,pgprot_t newprot)922 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
923 {
924 	pte_val(pte) &= _PAGE_CHG_MASK;
925 	pte_val(pte) |= pgprot_val(newprot);
926 	/*
927 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
928 	 * has the invalid bit set, clear it again for readable, young pages
929 	 */
930 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
931 		pte_val(pte) &= ~_PAGE_INVALID;
932 	/*
933 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
934 	 * protection bit set, clear it again for writable, dirty pages
935 	 */
936 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
937 		pte_val(pte) &= ~_PAGE_PROTECT;
938 	return pte;
939 }
940 
pte_wrprotect(pte_t pte)941 static inline pte_t pte_wrprotect(pte_t pte)
942 {
943 	pte_val(pte) &= ~_PAGE_WRITE;
944 	pte_val(pte) |= _PAGE_PROTECT;
945 	return pte;
946 }
947 
pte_mkwrite(pte_t pte)948 static inline pte_t pte_mkwrite(pte_t pte)
949 {
950 	pte_val(pte) |= _PAGE_WRITE;
951 	if (pte_val(pte) & _PAGE_DIRTY)
952 		pte_val(pte) &= ~_PAGE_PROTECT;
953 	return pte;
954 }
955 
pte_mkclean(pte_t pte)956 static inline pte_t pte_mkclean(pte_t pte)
957 {
958 	pte_val(pte) &= ~_PAGE_DIRTY;
959 	pte_val(pte) |= _PAGE_PROTECT;
960 	return pte;
961 }
962 
pte_mkdirty(pte_t pte)963 static inline pte_t pte_mkdirty(pte_t pte)
964 {
965 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
966 	if (pte_val(pte) & _PAGE_WRITE)
967 		pte_val(pte) &= ~_PAGE_PROTECT;
968 	return pte;
969 }
970 
pte_mkold(pte_t pte)971 static inline pte_t pte_mkold(pte_t pte)
972 {
973 	pte_val(pte) &= ~_PAGE_YOUNG;
974 	pte_val(pte) |= _PAGE_INVALID;
975 	return pte;
976 }
977 
pte_mkyoung(pte_t pte)978 static inline pte_t pte_mkyoung(pte_t pte)
979 {
980 	pte_val(pte) |= _PAGE_YOUNG;
981 	if (pte_val(pte) & _PAGE_READ)
982 		pte_val(pte) &= ~_PAGE_INVALID;
983 	return pte;
984 }
985 
pte_mkspecial(pte_t pte)986 static inline pte_t pte_mkspecial(pte_t pte)
987 {
988 	pte_val(pte) |= _PAGE_SPECIAL;
989 	return pte;
990 }
991 
992 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)993 static inline pte_t pte_mkhuge(pte_t pte)
994 {
995 	pte_val(pte) |= _PAGE_LARGE;
996 	return pte;
997 }
998 #endif
999 
1000 #define IPTE_GLOBAL	0
1001 #define	IPTE_LOCAL	1
1002 
1003 #define IPTE_NODAT	0x400
1004 #define IPTE_GUEST_ASCE	0x800
1005 
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1006 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1007 					unsigned long opt, unsigned long asce,
1008 					int local)
1009 {
1010 	unsigned long pto = (unsigned long) ptep;
1011 
1012 	if (__builtin_constant_p(opt) && opt == 0) {
1013 		/* Invalidation + TLB flush for the pte */
1014 		asm volatile(
1015 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1016 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1017 			  [m4] "i" (local));
1018 		return;
1019 	}
1020 
1021 	/* Invalidate ptes with options + TLB flush of the ptes */
1022 	opt = opt | (asce & _ASCE_ORIGIN);
1023 	asm volatile(
1024 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1025 		: [r2] "+a" (address), [r3] "+a" (opt)
1026 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1027 }
1028 
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1029 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1030 					      pte_t *ptep, int local)
1031 {
1032 	unsigned long pto = (unsigned long) ptep;
1033 
1034 	/* Invalidate a range of ptes + TLB flush of the ptes */
1035 	do {
1036 		asm volatile(
1037 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1038 			: [r2] "+a" (address), [r3] "+a" (nr)
1039 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1040 	} while (nr != 255);
1041 }
1042 
1043 /*
1044  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1045  * both clear the TLB for the unmapped pte. The reason is that
1046  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1047  * to modify an active pte. The sequence is
1048  *   1) ptep_get_and_clear
1049  *   2) set_pte_at
1050  *   3) flush_tlb_range
1051  * On s390 the tlb needs to get flushed with the modification of the pte
1052  * if the pte is active. The only way how this can be implemented is to
1053  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1054  * is a nop.
1055  */
1056 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1057 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058 
1059 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1060 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1061 					    unsigned long addr, pte_t *ptep)
1062 {
1063 	pte_t pte = *ptep;
1064 
1065 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1066 	return pte_young(pte);
1067 }
1068 
1069 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1070 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1071 					 unsigned long address, pte_t *ptep)
1072 {
1073 	return ptep_test_and_clear_young(vma, address, ptep);
1074 }
1075 
1076 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1077 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1078 				       unsigned long addr, pte_t *ptep)
1079 {
1080 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1081 }
1082 
1083 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1084 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1085 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1086 			     pte_t *, pte_t, pte_t);
1087 
1088 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1089 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1090 				     unsigned long addr, pte_t *ptep)
1091 {
1092 	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1093 }
1094 
1095 /*
1096  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1097  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1098  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1099  * cannot be accessed while the batched unmap is running. In this case
1100  * full==1 and a simple pte_clear is enough. See tlb.h.
1101  */
1102 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1103 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1104 					    unsigned long addr,
1105 					    pte_t *ptep, int full)
1106 {
1107 	if (full) {
1108 		pte_t pte = *ptep;
1109 		*ptep = __pte(_PAGE_INVALID);
1110 		return pte;
1111 	}
1112 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1113 }
1114 
1115 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1116 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1117 				      unsigned long addr, pte_t *ptep)
1118 {
1119 	pte_t pte = *ptep;
1120 
1121 	if (pte_write(pte))
1122 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1123 }
1124 
1125 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1126 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1127 					unsigned long addr, pte_t *ptep,
1128 					pte_t entry, int dirty)
1129 {
1130 	if (pte_same(*ptep, entry))
1131 		return 0;
1132 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1133 	return 1;
1134 }
1135 
1136 /*
1137  * Additional functions to handle KVM guest page tables
1138  */
1139 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1140 		     pte_t *ptep, pte_t entry);
1141 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1142 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1143 		 pte_t *ptep, unsigned long bits);
1144 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1145 		    pte_t *ptep, int prot, unsigned long bit);
1146 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1147 		     pte_t *ptep , int reset);
1148 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1149 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1150 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1151 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1152 
1153 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1154 			    pte_t *ptep);
1155 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1156 			  unsigned char key, bool nq);
1157 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1158 			       unsigned char key, unsigned char *oldkey,
1159 			       bool nq, bool mr, bool mc);
1160 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1161 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1162 			  unsigned char *key);
1163 
1164 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1165 				unsigned long bits, unsigned long value);
1166 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1167 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1168 			unsigned long *oldpte, unsigned long *oldpgste);
1169 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1170 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1171 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1172 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1173 
1174 /*
1175  * Certain architectures need to do special things when PTEs
1176  * within a page table are directly modified.  Thus, the following
1177  * hook is made available.
1178  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)1179 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1180 			      pte_t *ptep, pte_t entry)
1181 {
1182 	if (pte_present(entry))
1183 		pte_val(entry) &= ~_PAGE_UNUSED;
1184 	if (mm_has_pgste(mm))
1185 		ptep_set_pte_at(mm, addr, ptep, entry);
1186 	else
1187 		*ptep = entry;
1188 }
1189 
1190 /*
1191  * Conversion functions: convert a page and protection to a page entry,
1192  * and a page entry and page directory to the page they refer to.
1193  */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1194 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1195 {
1196 	pte_t __pte;
1197 	pte_val(__pte) = physpage + pgprot_val(pgprot);
1198 	if (!MACHINE_HAS_NX)
1199 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1200 	return pte_mkyoung(__pte);
1201 }
1202 
mk_pte(struct page * page,pgprot_t pgprot)1203 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1204 {
1205 	unsigned long physpage = page_to_phys(page);
1206 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1207 
1208 	if (pte_write(__pte) && PageDirty(page))
1209 		__pte = pte_mkdirty(__pte);
1210 	return __pte;
1211 }
1212 
1213 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1214 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1215 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1216 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1217 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1218 
1219 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1220 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1221 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1222 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1223 
1224 /*
1225  * The pgd_offset function *always* adds the index for the top-level
1226  * region/segment table. This is done to get a sequence like the
1227  * following to work:
1228  *	pgdp = pgd_offset(current->mm, addr);
1229  *	pgd = READ_ONCE(*pgdp);
1230  *	p4dp = p4d_offset(&pgd, addr);
1231  *	...
1232  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1233  * only add an index if they dereferenced the pointer.
1234  */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1235 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1236 {
1237 	unsigned long rste;
1238 	unsigned int shift;
1239 
1240 	/* Get the first entry of the top level table */
1241 	rste = pgd_val(*pgd);
1242 	/* Pick up the shift from the table type of the first entry */
1243 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1244 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1245 }
1246 
1247 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1248 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1249 
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1250 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1251 {
1252 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1253 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1254 	return (p4d_t *) pgdp;
1255 }
1256 #define p4d_offset_lockless p4d_offset_lockless
1257 
p4d_offset(pgd_t * pgdp,unsigned long address)1258 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1259 {
1260 	return p4d_offset_lockless(pgdp, *pgdp, address);
1261 }
1262 
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1263 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1264 {
1265 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1266 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1267 	return (pud_t *) p4dp;
1268 }
1269 #define pud_offset_lockless pud_offset_lockless
1270 
pud_offset(p4d_t * p4dp,unsigned long address)1271 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1272 {
1273 	return pud_offset_lockless(p4dp, *p4dp, address);
1274 }
1275 #define pud_offset pud_offset
1276 
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1277 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1278 {
1279 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1280 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1281 	return (pmd_t *) pudp;
1282 }
1283 #define pmd_offset_lockless pmd_offset_lockless
1284 
pmd_offset(pud_t * pudp,unsigned long address)1285 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1286 {
1287 	return pmd_offset_lockless(pudp, *pudp, address);
1288 }
1289 #define pmd_offset pmd_offset
1290 
pte_offset(pmd_t * pmd,unsigned long address)1291 static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1292 {
1293 	return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1294 }
1295 
1296 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1297 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1298 
pte_unmap(pte_t * pte)1299 static inline void pte_unmap(pte_t *pte) { }
1300 
gup_fast_permitted(unsigned long start,unsigned long end)1301 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1302 {
1303 	return end <= current->mm->context.asce_limit;
1304 }
1305 #define gup_fast_permitted gup_fast_permitted
1306 
1307 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1308 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1309 #define pte_page(x) pfn_to_page(pte_pfn(x))
1310 
1311 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1312 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1313 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1314 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1315 
pmd_wrprotect(pmd_t pmd)1316 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1317 {
1318 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1319 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1320 	return pmd;
1321 }
1322 
pmd_mkwrite(pmd_t pmd)1323 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1324 {
1325 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1326 	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1327 		return pmd;
1328 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1329 	return pmd;
1330 }
1331 
pmd_mkclean(pmd_t pmd)1332 static inline pmd_t pmd_mkclean(pmd_t pmd)
1333 {
1334 	if (pmd_large(pmd)) {
1335 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1336 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1337 	}
1338 	return pmd;
1339 }
1340 
pmd_mkdirty(pmd_t pmd)1341 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1342 {
1343 	if (pmd_large(pmd)) {
1344 		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1345 				_SEGMENT_ENTRY_SOFT_DIRTY;
1346 		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1347 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1348 	}
1349 	return pmd;
1350 }
1351 
pud_wrprotect(pud_t pud)1352 static inline pud_t pud_wrprotect(pud_t pud)
1353 {
1354 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1355 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1356 	return pud;
1357 }
1358 
pud_mkwrite(pud_t pud)1359 static inline pud_t pud_mkwrite(pud_t pud)
1360 {
1361 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1362 	if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1363 		return pud;
1364 	pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1365 	return pud;
1366 }
1367 
pud_mkclean(pud_t pud)1368 static inline pud_t pud_mkclean(pud_t pud)
1369 {
1370 	if (pud_large(pud)) {
1371 		pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1372 		pud_val(pud) |= _REGION_ENTRY_PROTECT;
1373 	}
1374 	return pud;
1375 }
1376 
pud_mkdirty(pud_t pud)1377 static inline pud_t pud_mkdirty(pud_t pud)
1378 {
1379 	if (pud_large(pud)) {
1380 		pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1381 				_REGION3_ENTRY_SOFT_DIRTY;
1382 		if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1383 			pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1384 	}
1385 	return pud;
1386 }
1387 
1388 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1389 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1390 {
1391 	/*
1392 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1393 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1394 	 */
1395 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1396 		return pgprot_val(SEGMENT_NONE);
1397 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1398 		return pgprot_val(SEGMENT_RO);
1399 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1400 		return pgprot_val(SEGMENT_RX);
1401 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1402 		return pgprot_val(SEGMENT_RW);
1403 	return pgprot_val(SEGMENT_RWX);
1404 }
1405 
pmd_mkyoung(pmd_t pmd)1406 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1407 {
1408 	if (pmd_large(pmd)) {
1409 		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1410 		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1411 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1412 	}
1413 	return pmd;
1414 }
1415 
pmd_mkold(pmd_t pmd)1416 static inline pmd_t pmd_mkold(pmd_t pmd)
1417 {
1418 	if (pmd_large(pmd)) {
1419 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1420 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1421 	}
1422 	return pmd;
1423 }
1424 
pmd_modify(pmd_t pmd,pgprot_t newprot)1425 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1426 {
1427 	if (pmd_large(pmd)) {
1428 		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1429 			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1430 			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1431 		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1432 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1433 			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1434 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1435 			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1436 		return pmd;
1437 	}
1438 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1439 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1440 	return pmd;
1441 }
1442 
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1443 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1444 {
1445 	pmd_t __pmd;
1446 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1447 	return __pmd;
1448 }
1449 
1450 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1451 
__pmdp_csp(pmd_t * pmdp)1452 static inline void __pmdp_csp(pmd_t *pmdp)
1453 {
1454 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1455 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1456 }
1457 
1458 #define IDTE_GLOBAL	0
1459 #define IDTE_LOCAL	1
1460 
1461 #define IDTE_PTOA	0x0800
1462 #define IDTE_NODAT	0x1000
1463 #define IDTE_GUEST_ASCE	0x2000
1464 
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1465 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1466 					unsigned long opt, unsigned long asce,
1467 					int local)
1468 {
1469 	unsigned long sto;
1470 
1471 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1472 	if (__builtin_constant_p(opt) && opt == 0) {
1473 		/* flush without guest asce */
1474 		asm volatile(
1475 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1476 			: "+m" (*pmdp)
1477 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1478 			  [m4] "i" (local)
1479 			: "cc" );
1480 	} else {
1481 		/* flush with guest asce */
1482 		asm volatile(
1483 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1484 			: "+m" (*pmdp)
1485 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1486 			  [r3] "a" (asce), [m4] "i" (local)
1487 			: "cc" );
1488 	}
1489 }
1490 
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1491 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1492 					unsigned long opt, unsigned long asce,
1493 					int local)
1494 {
1495 	unsigned long r3o;
1496 
1497 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1498 	r3o |= _ASCE_TYPE_REGION3;
1499 	if (__builtin_constant_p(opt) && opt == 0) {
1500 		/* flush without guest asce */
1501 		asm volatile(
1502 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1503 			: "+m" (*pudp)
1504 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1505 			  [m4] "i" (local)
1506 			: "cc");
1507 	} else {
1508 		/* flush with guest asce */
1509 		asm volatile(
1510 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1511 			: "+m" (*pudp)
1512 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1513 			  [r3] "a" (asce), [m4] "i" (local)
1514 			: "cc" );
1515 	}
1516 }
1517 
1518 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1519 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1520 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1521 
1522 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1523 
1524 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1525 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1526 				pgtable_t pgtable);
1527 
1528 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1529 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1530 
1531 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1532 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1533 					unsigned long addr, pmd_t *pmdp,
1534 					pmd_t entry, int dirty)
1535 {
1536 	VM_BUG_ON(addr & ~HPAGE_MASK);
1537 
1538 	entry = pmd_mkyoung(entry);
1539 	if (dirty)
1540 		entry = pmd_mkdirty(entry);
1541 	if (pmd_val(*pmdp) == pmd_val(entry))
1542 		return 0;
1543 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1544 	return 1;
1545 }
1546 
1547 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1548 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1549 					    unsigned long addr, pmd_t *pmdp)
1550 {
1551 	pmd_t pmd = *pmdp;
1552 
1553 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1554 	return pmd_young(pmd);
1555 }
1556 
1557 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1558 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1559 					 unsigned long addr, pmd_t *pmdp)
1560 {
1561 	VM_BUG_ON(addr & ~HPAGE_MASK);
1562 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1563 }
1564 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1565 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1566 			      pmd_t *pmdp, pmd_t entry)
1567 {
1568 	if (!MACHINE_HAS_NX)
1569 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1570 	*pmdp = entry;
1571 }
1572 
pmd_mkhuge(pmd_t pmd)1573 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1574 {
1575 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1576 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1577 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1578 	return pmd;
1579 }
1580 
1581 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1582 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1583 					    unsigned long addr, pmd_t *pmdp)
1584 {
1585 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1586 }
1587 
1588 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,int full)1589 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1590 						 unsigned long addr,
1591 						 pmd_t *pmdp, int full)
1592 {
1593 	if (full) {
1594 		pmd_t pmd = *pmdp;
1595 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1596 		return pmd;
1597 	}
1598 	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1599 }
1600 
1601 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1602 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1603 					  unsigned long addr, pmd_t *pmdp)
1604 {
1605 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1606 }
1607 
1608 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1609 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1610 				   unsigned long addr, pmd_t *pmdp)
1611 {
1612 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1613 
1614 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1615 }
1616 
1617 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1618 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1619 				      unsigned long addr, pmd_t *pmdp)
1620 {
1621 	pmd_t pmd = *pmdp;
1622 
1623 	if (pmd_write(pmd))
1624 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1625 }
1626 
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1627 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1628 					unsigned long address,
1629 					pmd_t *pmdp)
1630 {
1631 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1632 }
1633 #define pmdp_collapse_flush pmdp_collapse_flush
1634 
1635 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1636 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1637 
pmd_trans_huge(pmd_t pmd)1638 static inline int pmd_trans_huge(pmd_t pmd)
1639 {
1640 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1641 }
1642 
1643 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1644 static inline int has_transparent_hugepage(void)
1645 {
1646 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1647 }
1648 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1649 
1650 /*
1651  * 64 bit swap entry format:
1652  * A page-table entry has some bits we have to treat in a special way.
1653  * Bits 52 and bit 55 have to be zero, otherwise a specification
1654  * exception will occur instead of a page translation exception. The
1655  * specification exception has the bad habit not to store necessary
1656  * information in the lowcore.
1657  * Bits 54 and 63 are used to indicate the page type.
1658  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1659  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1660  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1661  * for the offset.
1662  * |			  offset			|01100|type |00|
1663  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1664  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1665  */
1666 
1667 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1668 #define __SWP_OFFSET_SHIFT	12
1669 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1670 #define __SWP_TYPE_SHIFT	2
1671 
mk_swap_pte(unsigned long type,unsigned long offset)1672 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1673 {
1674 	pte_t pte;
1675 
1676 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1677 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1678 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1679 	return pte;
1680 }
1681 
__swp_type(swp_entry_t entry)1682 static inline unsigned long __swp_type(swp_entry_t entry)
1683 {
1684 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1685 }
1686 
__swp_offset(swp_entry_t entry)1687 static inline unsigned long __swp_offset(swp_entry_t entry)
1688 {
1689 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1690 }
1691 
__swp_entry(unsigned long type,unsigned long offset)1692 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1693 {
1694 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1695 }
1696 
1697 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1698 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1699 
1700 #define kern_addr_valid(addr)   (1)
1701 
1702 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1703 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1704 extern int s390_enable_sie(void);
1705 extern int s390_enable_skey(void);
1706 extern void s390_reset_cmma(struct mm_struct *mm);
1707 
1708 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1709 #define HAVE_ARCH_UNMAPPED_AREA
1710 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1711 
1712 #include <asm-generic/pgtable.h>
1713 
1714 #endif /* _S390_PAGE_H */
1715