1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * PowerPC Memory Protection Keys management
4 *
5 * Copyright 2017, Ram Pai, IBM Corporation.
6 */
7
8 #include <asm/mman.h>
9 #include <asm/mmu_context.h>
10 #include <asm/mmu.h>
11 #include <asm/setup.h>
12 #include <linux/pkeys.h>
13 #include <linux/of_fdt.h>
14
15 int num_pkey; /* Max number of pkeys supported */
16 /*
17 * Keys marked in the reservation list cannot be allocated by userspace
18 */
19 u32 reserved_allocation_mask __ro_after_init;
20
21 /* Bits set for the initially allocated keys */
22 static u32 initial_allocation_mask __ro_after_init;
23
24 /*
25 * Even if we allocate keys with sys_pkey_alloc(), we need to make sure
26 * other thread still find the access denied using the same keys.
27 */
28 static u64 default_amr = ~0x0UL;
29 static u64 default_iamr = 0x5555555555555555UL;
30 u64 default_uamor __ro_after_init;
31 /*
32 * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
33 * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
34 */
35 static int execute_only_key = 2;
36 static bool pkey_execute_disable_supported;
37
38
39 #define AMR_BITS_PER_PKEY 2
40 #define AMR_RD_BIT 0x1UL
41 #define AMR_WR_BIT 0x2UL
42 #define IAMR_EX_BIT 0x1UL
43 #define PKEY_REG_BITS (sizeof(u64) * 8)
44 #define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY))
45
dt_scan_storage_keys(unsigned long node,const char * uname,int depth,void * data)46 static int __init dt_scan_storage_keys(unsigned long node,
47 const char *uname, int depth,
48 void *data)
49 {
50 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
51 const __be32 *prop;
52 int *pkeys_total = (int *) data;
53
54 /* We are scanning "cpu" nodes only */
55 if (type == NULL || strcmp(type, "cpu") != 0)
56 return 0;
57
58 prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
59 if (!prop)
60 return 0;
61 *pkeys_total = be32_to_cpu(prop[0]);
62 return 1;
63 }
64
scan_pkey_feature(void)65 static int scan_pkey_feature(void)
66 {
67 int ret;
68 int pkeys_total = 0;
69
70 /*
71 * Pkey is not supported with Radix translation.
72 */
73 if (early_radix_enabled())
74 return 0;
75
76 ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
77 if (ret == 0) {
78 /*
79 * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
80 * tree. We make this exception since some version of skiboot forgot to
81 * expose this property on power8/9.
82 */
83 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
84 unsigned long pvr = mfspr(SPRN_PVR);
85
86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
88 pkeys_total = 32;
89 }
90 }
91
92 /*
93 * Adjust the upper limit, based on the number of bits supported by
94 * arch-neutral code.
95 */
96 pkeys_total = min_t(int, pkeys_total,
97 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
98 return pkeys_total;
99 }
100
pkey_early_init_devtree(void)101 void __init pkey_early_init_devtree(void)
102 {
103 int pkeys_total, i;
104
105 /*
106 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
107 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
108 * Ensure that the bits a distinct.
109 */
110 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE &
111 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
112
113 /*
114 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous
115 * in the vmaflag. Make sure that is really the case.
116 */
117 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
118 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
119 != (sizeof(u64) * BITS_PER_BYTE));
120
121 /*
122 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
123 */
124 if (!early_cpu_has_feature(CPU_FTR_ARCH_206))
125 return;
126
127 /* scan the device tree for pkey feature */
128 pkeys_total = scan_pkey_feature();
129 if (!pkeys_total)
130 goto out;
131
132 /* Allow all keys to be modified by default */
133 default_uamor = ~0x0UL;
134
135 cur_cpu_spec->mmu_features |= MMU_FTR_PKEY;
136
137 /*
138 * The device tree cannot be relied to indicate support for
139 * execute_disable support. Instead we use a PVR check.
140 */
141 if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p))
142 pkey_execute_disable_supported = false;
143 else
144 pkey_execute_disable_supported = true;
145
146 #ifdef CONFIG_PPC_4K_PAGES
147 /*
148 * The OS can manage only 8 pkeys due to its inability to represent them
149 * in the Linux 4K PTE. Mark all other keys reserved.
150 */
151 num_pkey = min(8, pkeys_total);
152 #else
153 num_pkey = pkeys_total;
154 #endif
155
156 if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) {
157 /*
158 * Insufficient number of keys to support
159 * execute only key. Mark it unavailable.
160 */
161 execute_only_key = -1;
162 } else {
163 /*
164 * Mark the execute_only_pkey as not available for
165 * user allocation via pkey_alloc.
166 */
167 reserved_allocation_mask |= (0x1 << execute_only_key);
168
169 /*
170 * Deny READ/WRITE for execute_only_key.
171 * Allow execute in IAMR.
172 */
173 default_amr |= (0x3ul << pkeyshift(execute_only_key));
174 default_iamr &= ~(0x1ul << pkeyshift(execute_only_key));
175
176 /*
177 * Clear the uamor bits for this key.
178 */
179 default_uamor &= ~(0x3ul << pkeyshift(execute_only_key));
180 }
181
182 /*
183 * Allow access for only key 0. And prevent any other modification.
184 */
185 default_amr &= ~(0x3ul << pkeyshift(0));
186 default_iamr &= ~(0x1ul << pkeyshift(0));
187 default_uamor &= ~(0x3ul << pkeyshift(0));
188 /*
189 * key 0 is special in that we want to consider it an allocated
190 * key which is preallocated. We don't allow changing AMR bits
191 * w.r.t key 0. But one can pkey_free(key0)
192 */
193 initial_allocation_mask |= (0x1 << 0);
194
195 /*
196 * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
197 * programming note.
198 */
199 reserved_allocation_mask |= (0x1 << 1);
200 default_uamor &= ~(0x3ul << pkeyshift(1));
201
202 /*
203 * Prevent the usage of OS reserved keys. Update UAMOR
204 * for those keys. Also mark the rest of the bits in the
205 * 32 bit mask as reserved.
206 */
207 for (i = num_pkey; i < 32 ; i++) {
208 reserved_allocation_mask |= (0x1 << i);
209 default_uamor &= ~(0x3ul << pkeyshift(i));
210 }
211 /*
212 * Prevent the allocation of reserved keys too.
213 */
214 initial_allocation_mask |= reserved_allocation_mask;
215
216 pr_info("Enabling pkeys with max key count %d\n", num_pkey);
217 out:
218 /*
219 * Setup uamor on boot cpu
220 */
221 mtspr(SPRN_UAMOR, default_uamor);
222
223 return;
224 }
225
pkey_mm_init(struct mm_struct * mm)226 void pkey_mm_init(struct mm_struct *mm)
227 {
228 if (!mmu_has_feature(MMU_FTR_PKEY))
229 return;
230 mm_pkey_allocation_map(mm) = initial_allocation_mask;
231 mm->context.execute_only_pkey = execute_only_key;
232 }
233
read_amr(void)234 static inline u64 read_amr(void)
235 {
236 return mfspr(SPRN_AMR);
237 }
238
write_amr(u64 value)239 static inline void write_amr(u64 value)
240 {
241 mtspr(SPRN_AMR, value);
242 }
243
read_iamr(void)244 static inline u64 read_iamr(void)
245 {
246 if (!likely(pkey_execute_disable_supported))
247 return 0x0UL;
248
249 return mfspr(SPRN_IAMR);
250 }
251
write_iamr(u64 value)252 static inline void write_iamr(u64 value)
253 {
254 if (!likely(pkey_execute_disable_supported))
255 return;
256
257 mtspr(SPRN_IAMR, value);
258 }
259
init_amr(int pkey,u8 init_bits)260 static inline void init_amr(int pkey, u8 init_bits)
261 {
262 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
263 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
264
265 write_amr(old_amr | new_amr_bits);
266 }
267
init_iamr(int pkey,u8 init_bits)268 static inline void init_iamr(int pkey, u8 init_bits)
269 {
270 u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
271 u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
272
273 write_iamr(old_iamr | new_iamr_bits);
274 }
275
276 /*
277 * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
278 * specified in @init_val.
279 */
__arch_set_user_pkey_access(struct task_struct * tsk,int pkey,unsigned long init_val)280 int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
281 unsigned long init_val)
282 {
283 u64 new_amr_bits = 0x0ul;
284 u64 new_iamr_bits = 0x0ul;
285 u64 pkey_bits, uamor_pkey_bits;
286
287 /*
288 * Check whether the key is disabled by UAMOR.
289 */
290 pkey_bits = 0x3ul << pkeyshift(pkey);
291 uamor_pkey_bits = (default_uamor & pkey_bits);
292
293 /*
294 * Both the bits in UAMOR corresponding to the key should be set
295 */
296 if (uamor_pkey_bits != pkey_bits)
297 return -EINVAL;
298
299 if (init_val & PKEY_DISABLE_EXECUTE) {
300 if (!pkey_execute_disable_supported)
301 return -EINVAL;
302 new_iamr_bits |= IAMR_EX_BIT;
303 }
304 init_iamr(pkey, new_iamr_bits);
305
306 /* Set the bits we need in AMR: */
307 if (init_val & PKEY_DISABLE_ACCESS)
308 new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT;
309 else if (init_val & PKEY_DISABLE_WRITE)
310 new_amr_bits |= AMR_WR_BIT;
311
312 init_amr(pkey, new_amr_bits);
313 return 0;
314 }
315
thread_pkey_regs_save(struct thread_struct * thread)316 void thread_pkey_regs_save(struct thread_struct *thread)
317 {
318 if (!mmu_has_feature(MMU_FTR_PKEY))
319 return;
320
321 /*
322 * TODO: Skip saving registers if @thread hasn't used any keys yet.
323 */
324 thread->amr = read_amr();
325 thread->iamr = read_iamr();
326 }
327
thread_pkey_regs_restore(struct thread_struct * new_thread,struct thread_struct * old_thread)328 void thread_pkey_regs_restore(struct thread_struct *new_thread,
329 struct thread_struct *old_thread)
330 {
331 if (!mmu_has_feature(MMU_FTR_PKEY))
332 return;
333
334 if (old_thread->amr != new_thread->amr)
335 write_amr(new_thread->amr);
336 if (old_thread->iamr != new_thread->iamr)
337 write_iamr(new_thread->iamr);
338 }
339
thread_pkey_regs_init(struct thread_struct * thread)340 void thread_pkey_regs_init(struct thread_struct *thread)
341 {
342 if (!mmu_has_feature(MMU_FTR_PKEY))
343 return;
344
345 thread->amr = default_amr;
346 thread->iamr = default_iamr;
347
348 write_amr(default_amr);
349 write_iamr(default_iamr);
350 }
351
execute_only_pkey(struct mm_struct * mm)352 int execute_only_pkey(struct mm_struct *mm)
353 {
354 return mm->context.execute_only_pkey;
355 }
356
vma_is_pkey_exec_only(struct vm_area_struct * vma)357 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
358 {
359 /* Do this check first since the vm_flags should be hot */
360 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
361 return false;
362
363 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
364 }
365
366 /*
367 * This should only be called for *plain* mprotect calls.
368 */
__arch_override_mprotect_pkey(struct vm_area_struct * vma,int prot,int pkey)369 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
370 int pkey)
371 {
372 /*
373 * If the currently associated pkey is execute-only, but the requested
374 * protection is not execute-only, move it back to the default pkey.
375 */
376 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
377 return 0;
378
379 /*
380 * The requested protection is execute-only. Hence let's use an
381 * execute-only pkey.
382 */
383 if (prot == PROT_EXEC) {
384 pkey = execute_only_pkey(vma->vm_mm);
385 if (pkey > 0)
386 return pkey;
387 }
388
389 /* Nothing to override. */
390 return vma_pkey(vma);
391 }
392
pkey_access_permitted(int pkey,bool write,bool execute)393 static bool pkey_access_permitted(int pkey, bool write, bool execute)
394 {
395 int pkey_shift;
396 u64 amr;
397
398 pkey_shift = pkeyshift(pkey);
399 if (execute)
400 return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
401
402 amr = read_amr();
403 if (write)
404 return !(amr & (AMR_WR_BIT << pkey_shift));
405
406 return !(amr & (AMR_RD_BIT << pkey_shift));
407 }
408
arch_pte_access_permitted(u64 pte,bool write,bool execute)409 bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
410 {
411 if (!mmu_has_feature(MMU_FTR_PKEY))
412 return true;
413
414 return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
415 }
416
417 /*
418 * We only want to enforce protection keys on the current thread because we
419 * effectively have no access to AMR/IAMR for other threads or any way to tell
420 * which AMR/IAMR in a threaded process we could use.
421 *
422 * So do not enforce things if the VMA is not from the current mm, or if we are
423 * in a kernel thread.
424 */
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)425 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
426 bool execute, bool foreign)
427 {
428 if (!mmu_has_feature(MMU_FTR_PKEY))
429 return true;
430 /*
431 * Do not enforce our key-permissions on a foreign vma.
432 */
433 if (foreign || vma_is_foreign(vma))
434 return true;
435
436 return pkey_access_permitted(vma_pkey(vma), write, execute);
437 }
438
arch_dup_pkeys(struct mm_struct * oldmm,struct mm_struct * mm)439 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
440 {
441 if (!mmu_has_feature(MMU_FTR_PKEY))
442 return;
443
444 /* Duplicate the oldmm pkey state in mm: */
445 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
446 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
447 }
448