• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Common bits between 4K and 64K pages in a linux-style PTE.
8  * Additional bits may be defined in pgtable-hash64-*.h
9  *
10  */
11 #define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
12 #define H_PAGE_F_GIX_SHIFT	56
13 #define H_PAGE_BUSY		_RPAGE_RSV1 /* software: PTE & hash are busy */
14 #define H_PAGE_F_SECOND		_RPAGE_RSV2	/* HPTE is in 2ndary HPTEG */
15 #define H_PAGE_F_GIX		(_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44)
16 #define H_PAGE_HASHPTE		_RPAGE_RPN43	/* PTE has associated HPTE */
17 
18 #ifdef CONFIG_PPC_64K_PAGES
19 #include <asm/book3s/64/hash-64k.h>
20 #else
21 #include <asm/book3s/64/hash-4k.h>
22 #endif
23 
24 /*
25  * Size of EA range mapped by our pagetables.
26  */
27 #define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
28 				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
29 #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
30 
31 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&  defined(CONFIG_PPC_64K_PAGES)
32 /*
33  * only with hash 64k we need to use the second half of pmd page table
34  * to store pointer to deposited pgtable_t
35  */
36 #define H_PMD_CACHE_INDEX	(H_PMD_INDEX_SIZE + 1)
37 #else
38 #define H_PMD_CACHE_INDEX	H_PMD_INDEX_SIZE
39 #endif
40 /*
41  * Define the address range of the kernel non-linear virtual area
42  */
43 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
44 #define H_KERN_VIRT_SIZE  ASM_CONST(0x0000400000000000) /* 64T */
45 
46 /*
47  * The vmalloc space starts at the beginning of that region, and
48  * occupies half of it on hash CPUs and a quarter of it on Book3E
49  * (we keep a quarter for the virtual memmap)
50  */
51 #define H_VMALLOC_START	H_KERN_VIRT_START
52 #define H_VMALLOC_SIZE	ASM_CONST(0x380000000000) /* 56T */
53 #define H_VMALLOC_END	(H_VMALLOC_START + H_VMALLOC_SIZE)
54 
55 #define H_KERN_IO_START	H_VMALLOC_END
56 
57 /*
58  * Region IDs
59  */
60 #define REGION_SHIFT		60UL
61 #define REGION_MASK		(0xfUL << REGION_SHIFT)
62 #define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
63 
64 #define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
65 #define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
66 #define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
67 #define USER_REGION_ID		(0UL)
68 
69 /*
70  * Defines the address of the vmemap area, in its own region on
71  * hash table CPUs.
72  */
73 #define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
74 
75 #ifdef CONFIG_PPC_MM_SLICES
76 #define HAVE_ARCH_UNMAPPED_AREA
77 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
78 #endif /* CONFIG_PPC_MM_SLICES */
79 
80 
81 /* PTEIDX nibble */
82 #define _PTEIDX_SECONDARY	0x8
83 #define _PTEIDX_GROUP_IX	0x7
84 
85 #define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
86 #define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
87 
88 #ifndef __ASSEMBLY__
89 #define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
90 #define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
hash__pgd_bad(pgd_t pgd)91 static inline int hash__pgd_bad(pgd_t pgd)
92 {
93 	return (pgd_val(pgd) == 0);
94 }
95 #ifdef CONFIG_STRICT_KERNEL_RWX
96 extern void hash__mark_rodata_ro(void);
97 extern void hash__mark_initmem_nx(void);
98 #endif
99 
100 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
101 			    pte_t *ptep, unsigned long pte, int huge);
102 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
103 /* Atomic PTE updates */
hash__pte_update(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long clr,unsigned long set,int huge)104 static inline unsigned long hash__pte_update(struct mm_struct *mm,
105 					 unsigned long addr,
106 					 pte_t *ptep, unsigned long clr,
107 					 unsigned long set,
108 					 int huge)
109 {
110 	__be64 old_be, tmp_be;
111 	unsigned long old;
112 
113 	__asm__ __volatile__(
114 	"1:	ldarx	%0,0,%3		# pte_update\n\
115 	and.	%1,%0,%6\n\
116 	bne-	1b \n\
117 	andc	%1,%0,%4 \n\
118 	or	%1,%1,%7\n\
119 	stdcx.	%1,0,%3 \n\
120 	bne-	1b"
121 	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
122 	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
123 	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
124 	: "cc" );
125 	/* huge pages use the old page table lock */
126 	if (!huge)
127 		assert_pte_locked(mm, addr);
128 
129 	old = be64_to_cpu(old_be);
130 	if (old & H_PAGE_HASHPTE)
131 		hpte_need_flush(mm, addr, ptep, old, huge);
132 
133 	return old;
134 }
135 
136 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
137  * function doesn't need to flush the hash entry
138  */
hash__ptep_set_access_flags(pte_t * ptep,pte_t entry)139 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
140 {
141 	__be64 old, tmp, val, mask;
142 
143 	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
144 			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
145 
146 	val = pte_raw(entry) & mask;
147 
148 	__asm__ __volatile__(
149 	"1:	ldarx	%0,0,%4\n\
150 		and.	%1,%0,%6\n\
151 		bne-	1b \n\
152 		or	%0,%3,%0\n\
153 		stdcx.	%0,0,%4\n\
154 		bne-	1b"
155 	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
156 	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
157 	:"cc");
158 }
159 
hash__pte_same(pte_t pte_a,pte_t pte_b)160 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
161 {
162 	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
163 }
164 
hash__pte_none(pte_t pte)165 static inline int hash__pte_none(pte_t pte)
166 {
167 	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
168 }
169 
170 /* This low level function performs the actual PTE insertion
171  * Setting the PTE depends on the MMU type and other factors. It's
172  * an horrible mess that I'm not going to try to clean up now but
173  * I'm keeping it in one place rather than spread around
174  */
hash__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)175 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
176 				  pte_t *ptep, pte_t pte, int percpu)
177 {
178 	/*
179 	 * Anything else just stores the PTE normally. That covers all 64-bit
180 	 * cases, and 32-bit non-hash with 32-bit PTEs.
181 	 */
182 	*ptep = pte;
183 }
184 
185 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
186 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
187 				   pmd_t *pmdp, unsigned long old_pmd);
188 #else
hpte_do_hugepage_flush(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long old_pmd)189 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
190 					  unsigned long addr, pmd_t *pmdp,
191 					  unsigned long old_pmd)
192 {
193 	WARN(1, "%s called with THP disabled\n", __func__);
194 }
195 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
196 
197 
198 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
199 			     unsigned long flags);
200 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
201 					      unsigned long page_size,
202 					      unsigned long phys);
203 extern void hash__vmemmap_remove_mapping(unsigned long start,
204 				     unsigned long page_size);
205 
206 int hash__create_section_mapping(unsigned long start, unsigned long end);
207 int hash__remove_section_mapping(unsigned long start, unsigned long end);
208 
209 #endif /* !__ASSEMBLY__ */
210 #endif /* __KERNEL__ */
211 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
212