• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  mm/mprotect.c
4  *
5  *  (C) Copyright 1994 Linus Torvalds
6  *  (C) Copyright 2002 Christoph Hellwig
7  *
8  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
9  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <asm/pgtable.h>
32 #include <asm/cacheflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/tlbflush.h>
35 
36 #include "internal.h"
37 
change_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39 		unsigned long addr, unsigned long end, pgprot_t newprot,
40 		int dirty_accountable, int prot_numa)
41 {
42 	struct mm_struct *mm = vma->vm_mm;
43 	pte_t *pte, oldpte;
44 	spinlock_t *ptl;
45 	unsigned long pages = 0;
46 	int target_node = NUMA_NO_NODE;
47 
48 	/*
49 	 * Can be called with only the mmap_sem for reading by
50 	 * prot_numa so we must check the pmd isn't constantly
51 	 * changing from under us from pmd_none to pmd_trans_huge
52 	 * and/or the other way around.
53 	 */
54 	if (pmd_trans_unstable(pmd))
55 		return 0;
56 
57 	/*
58 	 * The pmd points to a regular pte so the pmd can't change
59 	 * from under us even if the mmap_sem is only hold for
60 	 * reading.
61 	 */
62 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
63 
64 	/* Get target node for single threaded private VMAs */
65 	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
66 	    atomic_read(&vma->vm_mm->mm_users) == 1)
67 		target_node = numa_node_id();
68 
69 	flush_tlb_batched_pending(vma->vm_mm);
70 	arch_enter_lazy_mmu_mode();
71 	do {
72 		oldpte = *pte;
73 		if (pte_present(oldpte)) {
74 			pte_t ptent;
75 			bool preserve_write = prot_numa && pte_write(oldpte);
76 
77 			/*
78 			 * Avoid trapping faults against the zero or KSM
79 			 * pages. See similar comment in change_huge_pmd.
80 			 */
81 			if (prot_numa) {
82 				struct page *page;
83 
84 				page = vm_normal_page(vma, addr, oldpte);
85 				if (!page || PageKsm(page))
86 					continue;
87 
88 				/* Also skip shared copy-on-write pages */
89 				if (is_cow_mapping(vma->vm_flags) &&
90 				    page_mapcount(page) != 1)
91 					continue;
92 
93 				/*
94 				 * While migration can move some dirty pages,
95 				 * it cannot move them all from MIGRATE_ASYNC
96 				 * context.
97 				 */
98 				if (page_is_file_cache(page) && PageDirty(page))
99 					continue;
100 
101 				/* Avoid TLB flush if possible */
102 				if (pte_protnone(oldpte))
103 					continue;
104 
105 				/*
106 				 * Don't mess with PTEs if page is already on the node
107 				 * a single-threaded process is running on.
108 				 */
109 				if (target_node == page_to_nid(page))
110 					continue;
111 			}
112 
113 			ptent = ptep_modify_prot_start(mm, addr, pte);
114 			ptent = pte_modify(ptent, newprot);
115 			if (preserve_write)
116 				ptent = pte_mk_savedwrite(ptent);
117 
118 			/* Avoid taking write faults for known dirty pages */
119 			if (dirty_accountable && pte_dirty(ptent) &&
120 					(pte_soft_dirty(ptent) ||
121 					 !(vma->vm_flags & VM_SOFTDIRTY))) {
122 				ptent = pte_mkwrite(ptent);
123 			}
124 			ptep_modify_prot_commit(mm, addr, pte, ptent);
125 			pages++;
126 		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
127 			swp_entry_t entry = pte_to_swp_entry(oldpte);
128 
129 			if (is_write_migration_entry(entry)) {
130 				pte_t newpte;
131 				/*
132 				 * A protection check is difficult so
133 				 * just be safe and disable write
134 				 */
135 				make_migration_entry_read(&entry);
136 				newpte = swp_entry_to_pte(entry);
137 				if (pte_swp_soft_dirty(oldpte))
138 					newpte = pte_swp_mksoft_dirty(newpte);
139 				set_pte_at(mm, addr, pte, newpte);
140 
141 				pages++;
142 			}
143 
144 			if (is_write_device_private_entry(entry)) {
145 				pte_t newpte;
146 
147 				/*
148 				 * We do not preserve soft-dirtiness. See
149 				 * copy_one_pte() for explanation.
150 				 */
151 				make_device_private_entry_read(&entry);
152 				newpte = swp_entry_to_pte(entry);
153 				set_pte_at(mm, addr, pte, newpte);
154 
155 				pages++;
156 			}
157 		}
158 	} while (pte++, addr += PAGE_SIZE, addr != end);
159 	arch_leave_lazy_mmu_mode();
160 	pte_unmap_unlock(pte - 1, ptl);
161 
162 	return pages;
163 }
164 
165 /*
166  * Used when setting automatic NUMA hinting protection where it is
167  * critical that a numa hinting PMD is not confused with a bad PMD.
168  */
pmd_none_or_clear_bad_unless_trans_huge(pmd_t * pmd)169 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
170 {
171 	pmd_t pmdval = pmd_read_atomic(pmd);
172 
173 	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 	barrier();
176 #endif
177 
178 	if (pmd_none(pmdval))
179 		return 1;
180 	if (pmd_trans_huge(pmdval))
181 		return 0;
182 	if (unlikely(pmd_bad(pmdval))) {
183 		pmd_clear_bad(pmd);
184 		return 1;
185 	}
186 
187 	return 0;
188 }
189 
change_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)190 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
191 		pud_t *pud, unsigned long addr, unsigned long end,
192 		pgprot_t newprot, int dirty_accountable, int prot_numa)
193 {
194 	pmd_t *pmd;
195 	struct mm_struct *mm = vma->vm_mm;
196 	unsigned long next;
197 	unsigned long pages = 0;
198 	unsigned long nr_huge_updates = 0;
199 	unsigned long mni_start = 0;
200 
201 	pmd = pmd_offset(pud, addr);
202 	do {
203 		unsigned long this_pages;
204 
205 		next = pmd_addr_end(addr, end);
206 
207 		/*
208 		 * Automatic NUMA balancing walks the tables with mmap_sem
209 		 * held for read. It's possible a parallel update to occur
210 		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
211 		 * check leading to a false positive and clearing.
212 		 * Hence, it's necessary to atomically read the PMD value
213 		 * for all the checks.
214 		 */
215 		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
216 		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
217 			goto next;
218 
219 		/* invoke the mmu notifier if the pmd is populated */
220 		if (!mni_start) {
221 			mni_start = addr;
222 			mmu_notifier_invalidate_range_start(mm, mni_start, end);
223 		}
224 
225 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
226 			if (next - addr != HPAGE_PMD_SIZE) {
227 				__split_huge_pmd(vma, pmd, addr, false, NULL);
228 			} else {
229 				int nr_ptes = change_huge_pmd(vma, pmd, addr,
230 						newprot, prot_numa);
231 
232 				if (nr_ptes) {
233 					if (nr_ptes == HPAGE_PMD_NR) {
234 						pages += HPAGE_PMD_NR;
235 						nr_huge_updates++;
236 					}
237 
238 					/* huge pmd was handled */
239 					goto next;
240 				}
241 			}
242 			/* fall through, the trans huge pmd just split */
243 		}
244 		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
245 				 dirty_accountable, prot_numa);
246 		pages += this_pages;
247 next:
248 		cond_resched();
249 	} while (pmd++, addr = next, addr != end);
250 
251 	if (mni_start)
252 		mmu_notifier_invalidate_range_end(mm, mni_start, end);
253 
254 	if (nr_huge_updates)
255 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
256 	return pages;
257 }
258 
change_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)259 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
260 		p4d_t *p4d, unsigned long addr, unsigned long end,
261 		pgprot_t newprot, int dirty_accountable, int prot_numa)
262 {
263 	pud_t *pud;
264 	unsigned long next;
265 	unsigned long pages = 0;
266 
267 	pud = pud_offset(p4d, addr);
268 	do {
269 		next = pud_addr_end(addr, end);
270 		if (pud_none_or_clear_bad(pud))
271 			continue;
272 		pages += change_pmd_range(vma, pud, addr, next, newprot,
273 				 dirty_accountable, prot_numa);
274 	} while (pud++, addr = next, addr != end);
275 
276 	return pages;
277 }
278 
change_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)279 static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
280 		pgd_t *pgd, unsigned long addr, unsigned long end,
281 		pgprot_t newprot, int dirty_accountable, int prot_numa)
282 {
283 	p4d_t *p4d;
284 	unsigned long next;
285 	unsigned long pages = 0;
286 
287 	p4d = p4d_offset(pgd, addr);
288 	do {
289 		next = p4d_addr_end(addr, end);
290 		if (p4d_none_or_clear_bad(p4d))
291 			continue;
292 		pages += change_pud_range(vma, p4d, addr, next, newprot,
293 				 dirty_accountable, prot_numa);
294 	} while (p4d++, addr = next, addr != end);
295 
296 	return pages;
297 }
298 
change_protection_range(struct vm_area_struct * vma,unsigned long addr,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)299 static unsigned long change_protection_range(struct vm_area_struct *vma,
300 		unsigned long addr, unsigned long end, pgprot_t newprot,
301 		int dirty_accountable, int prot_numa)
302 {
303 	struct mm_struct *mm = vma->vm_mm;
304 	pgd_t *pgd;
305 	unsigned long next;
306 	unsigned long start = addr;
307 	unsigned long pages = 0;
308 
309 	BUG_ON(addr >= end);
310 	pgd = pgd_offset(mm, addr);
311 	flush_cache_range(vma, addr, end);
312 	inc_tlb_flush_pending(mm);
313 	do {
314 		next = pgd_addr_end(addr, end);
315 		if (pgd_none_or_clear_bad(pgd))
316 			continue;
317 		pages += change_p4d_range(vma, pgd, addr, next, newprot,
318 				 dirty_accountable, prot_numa);
319 	} while (pgd++, addr = next, addr != end);
320 
321 	/* Only flush the TLB if we actually modified any entries: */
322 	if (pages)
323 		flush_tlb_range(vma, start, end);
324 	dec_tlb_flush_pending(mm);
325 
326 	return pages;
327 }
328 
change_protection(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgprot_t newprot,int dirty_accountable,int prot_numa)329 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
330 		       unsigned long end, pgprot_t newprot,
331 		       int dirty_accountable, int prot_numa)
332 {
333 	unsigned long pages;
334 
335 	if (is_vm_hugetlb_page(vma))
336 		pages = hugetlb_change_protection(vma, start, end, newprot);
337 	else
338 		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
339 
340 	return pages;
341 }
342 
prot_none_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)343 static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
344 			       unsigned long next, struct mm_walk *walk)
345 {
346 	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
347 		0 : -EACCES;
348 }
349 
prot_none_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long next,struct mm_walk * walk)350 static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
351 				   unsigned long addr, unsigned long next,
352 				   struct mm_walk *walk)
353 {
354 	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
355 		0 : -EACCES;
356 }
357 
prot_none_test(unsigned long addr,unsigned long next,struct mm_walk * walk)358 static int prot_none_test(unsigned long addr, unsigned long next,
359 			  struct mm_walk *walk)
360 {
361 	return 0;
362 }
363 
prot_none_walk(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long newflags)364 static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
365 			   unsigned long end, unsigned long newflags)
366 {
367 	pgprot_t new_pgprot = vm_get_page_prot(newflags);
368 	struct mm_walk prot_none_walk = {
369 		.pte_entry = prot_none_pte_entry,
370 		.hugetlb_entry = prot_none_hugetlb_entry,
371 		.test_walk = prot_none_test,
372 		.mm = current->mm,
373 		.private = &new_pgprot,
374 	};
375 
376 	return walk_page_range(start, end, &prot_none_walk);
377 }
378 
379 int
mprotect_fixup(struct vm_area_struct * vma,struct vm_area_struct ** pprev,unsigned long start,unsigned long end,unsigned long newflags)380 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
381 	unsigned long start, unsigned long end, unsigned long newflags)
382 {
383 	struct mm_struct *mm = vma->vm_mm;
384 	unsigned long oldflags = vma->vm_flags;
385 	long nrpages = (end - start) >> PAGE_SHIFT;
386 	unsigned long charged = 0;
387 	pgoff_t pgoff;
388 	int error;
389 	int dirty_accountable = 0;
390 
391 	if (newflags == oldflags) {
392 		*pprev = vma;
393 		return 0;
394 	}
395 
396 	/*
397 	 * Do PROT_NONE PFN permission checks here when we can still
398 	 * bail out without undoing a lot of state. This is a rather
399 	 * uncommon case, so doesn't need to be very optimized.
400 	 */
401 	if (arch_has_pfn_modify_check() &&
402 	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
403 	    (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
404 		error = prot_none_walk(vma, start, end, newflags);
405 		if (error)
406 			return error;
407 	}
408 
409 	/*
410 	 * If we make a private mapping writable we increase our commit;
411 	 * but (without finer accounting) cannot reduce our commit if we
412 	 * make it unwritable again. hugetlb mapping were accounted for
413 	 * even if read-only so there is no need to account for them here
414 	 */
415 	if (newflags & VM_WRITE) {
416 		/* Check space limits when area turns into data. */
417 		if (!may_expand_vm(mm, newflags, nrpages) &&
418 				may_expand_vm(mm, oldflags, nrpages))
419 			return -ENOMEM;
420 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
421 						VM_SHARED|VM_NORESERVE))) {
422 			charged = nrpages;
423 			if (security_vm_enough_memory_mm(mm, charged))
424 				return -ENOMEM;
425 			newflags |= VM_ACCOUNT;
426 		}
427 	}
428 
429 	/*
430 	 * First try to merge with previous and/or next vma.
431 	 */
432 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
433 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
434 			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
435 			   vma->vm_userfaultfd_ctx);
436 	if (*pprev) {
437 		vma = *pprev;
438 		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
439 		goto success;
440 	}
441 
442 	*pprev = vma;
443 
444 	if (start != vma->vm_start) {
445 		error = split_vma(mm, vma, start, 1);
446 		if (error)
447 			goto fail;
448 	}
449 
450 	if (end != vma->vm_end) {
451 		error = split_vma(mm, vma, end, 0);
452 		if (error)
453 			goto fail;
454 	}
455 
456 success:
457 	/*
458 	 * vm_flags and vm_page_prot are protected by the mmap_sem
459 	 * held in write mode.
460 	 */
461 	vma->vm_flags = newflags;
462 	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
463 	vma_set_page_prot(vma);
464 
465 	change_protection(vma, start, end, vma->vm_page_prot,
466 			  dirty_accountable, 0);
467 
468 	/*
469 	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
470 	 * fault on access.
471 	 */
472 	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
473 			(newflags & VM_WRITE)) {
474 		populate_vma_page_range(vma, start, end, NULL);
475 	}
476 
477 	vm_stat_account(mm, oldflags, -nrpages);
478 	vm_stat_account(mm, newflags, nrpages);
479 	perf_event_mmap(vma);
480 	return 0;
481 
482 fail:
483 	vm_unacct_memory(charged);
484 	return error;
485 }
486 
487 /*
488  * pkey==-1 when doing a legacy mprotect()
489  */
do_mprotect_pkey(unsigned long start,size_t len,unsigned long prot,int pkey)490 static int do_mprotect_pkey(unsigned long start, size_t len,
491 		unsigned long prot, int pkey)
492 {
493 	unsigned long nstart, end, tmp, reqprot;
494 	struct vm_area_struct *vma, *prev;
495 	int error = -EINVAL;
496 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
497 	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
498 				(prot & PROT_READ);
499 
500 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
501 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
502 		return -EINVAL;
503 
504 	if (start & ~PAGE_MASK)
505 		return -EINVAL;
506 	if (!len)
507 		return 0;
508 	len = PAGE_ALIGN(len);
509 	end = start + len;
510 	if (end <= start)
511 		return -ENOMEM;
512 	if (!arch_validate_prot(prot, start))
513 		return -EINVAL;
514 
515 	reqprot = prot;
516 
517 	if (down_write_killable(&current->mm->mmap_sem))
518 		return -EINTR;
519 
520 	/*
521 	 * If userspace did not allocate the pkey, do not let
522 	 * them use it here.
523 	 */
524 	error = -EINVAL;
525 	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
526 		goto out;
527 
528 	vma = find_vma(current->mm, start);
529 	error = -ENOMEM;
530 	if (!vma)
531 		goto out;
532 	prev = vma->vm_prev;
533 	if (unlikely(grows & PROT_GROWSDOWN)) {
534 		if (vma->vm_start >= end)
535 			goto out;
536 		start = vma->vm_start;
537 		error = -EINVAL;
538 		if (!(vma->vm_flags & VM_GROWSDOWN))
539 			goto out;
540 	} else {
541 		if (vma->vm_start > start)
542 			goto out;
543 		if (unlikely(grows & PROT_GROWSUP)) {
544 			end = vma->vm_end;
545 			error = -EINVAL;
546 			if (!(vma->vm_flags & VM_GROWSUP))
547 				goto out;
548 		}
549 	}
550 	if (start > vma->vm_start)
551 		prev = vma;
552 
553 	for (nstart = start ; ; ) {
554 		unsigned long mask_off_old_flags;
555 		unsigned long newflags;
556 		int new_vma_pkey;
557 
558 		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
559 
560 		/* Does the application expect PROT_READ to imply PROT_EXEC */
561 		if (rier && (vma->vm_flags & VM_MAYEXEC))
562 			prot |= PROT_EXEC;
563 
564 		/*
565 		 * Each mprotect() call explicitly passes r/w/x permissions.
566 		 * If a permission is not passed to mprotect(), it must be
567 		 * cleared from the VMA.
568 		 */
569 		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
570 					VM_FLAGS_CLEAR;
571 
572 		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
573 		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
574 		newflags |= (vma->vm_flags & ~mask_off_old_flags);
575 
576 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
577 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
578 			error = -EACCES;
579 			goto out;
580 		}
581 
582 		error = security_file_mprotect(vma, reqprot, prot);
583 		if (error)
584 			goto out;
585 
586 		tmp = vma->vm_end;
587 		if (tmp > end)
588 			tmp = end;
589 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
590 		if (error)
591 			goto out;
592 		nstart = tmp;
593 
594 		if (nstart < prev->vm_end)
595 			nstart = prev->vm_end;
596 		if (nstart >= end)
597 			goto out;
598 
599 		vma = prev->vm_next;
600 		if (!vma || vma->vm_start != nstart) {
601 			error = -ENOMEM;
602 			goto out;
603 		}
604 		prot = reqprot;
605 	}
606 out:
607 	up_write(&current->mm->mmap_sem);
608 	return error;
609 }
610 
SYSCALL_DEFINE3(mprotect,unsigned long,start,size_t,len,unsigned long,prot)611 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
612 		unsigned long, prot)
613 {
614 	return do_mprotect_pkey(start, len, prot, -1);
615 }
616 
617 #ifdef CONFIG_ARCH_HAS_PKEYS
618 
SYSCALL_DEFINE4(pkey_mprotect,unsigned long,start,size_t,len,unsigned long,prot,int,pkey)619 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
620 		unsigned long, prot, int, pkey)
621 {
622 	return do_mprotect_pkey(start, len, prot, pkey);
623 }
624 
SYSCALL_DEFINE2(pkey_alloc,unsigned long,flags,unsigned long,init_val)625 SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
626 {
627 	int pkey;
628 	int ret;
629 
630 	/* No flags supported yet. */
631 	if (flags)
632 		return -EINVAL;
633 	/* check for unsupported init values */
634 	if (init_val & ~PKEY_ACCESS_MASK)
635 		return -EINVAL;
636 
637 	down_write(&current->mm->mmap_sem);
638 	pkey = mm_pkey_alloc(current->mm);
639 
640 	ret = -ENOSPC;
641 	if (pkey == -1)
642 		goto out;
643 
644 	ret = arch_set_user_pkey_access(current, pkey, init_val);
645 	if (ret) {
646 		mm_pkey_free(current->mm, pkey);
647 		goto out;
648 	}
649 	ret = pkey;
650 out:
651 	up_write(&current->mm->mmap_sem);
652 	return ret;
653 }
654 
SYSCALL_DEFINE1(pkey_free,int,pkey)655 SYSCALL_DEFINE1(pkey_free, int, pkey)
656 {
657 	int ret;
658 
659 	down_write(&current->mm->mmap_sem);
660 	ret = mm_pkey_free(current->mm, pkey);
661 	up_write(&current->mm->mmap_sem);
662 
663 	/*
664 	 * We could provie warnings or errors if any VMA still
665 	 * has the pkey set here.
666 	 */
667 	return ret;
668 }
669 
670 #endif /* CONFIG_ARCH_HAS_PKEYS */
671