• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Flexible mmap layout support
3  *
4  * Based on code by Ingo Molnar and Andi Kleen, copyrighted
5  * as follows:
6  *
7  * Copyright 2003-2009 Red Hat Inc.
8  * All Rights Reserved.
9  * Copyright 2005 Andi Kleen, SUSE Labs.
10  * Copyright 2007 Jiri Kosina, SUSE Labs.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  */
26 
27 #include <linux/personality.h>
28 #include <linux/mm.h>
29 #include <linux/random.h>
30 #include <linux/limits.h>
31 #include <linux/sched.h>
32 #include <asm/elf.h>
33 
34 struct va_alignment __read_mostly va_align = {
35 	.flags = -1,
36 };
37 
stack_maxrandom_size(void)38 static unsigned long stack_maxrandom_size(void)
39 {
40 	unsigned long max = 0;
41 	if ((current->flags & PF_RANDOMIZE) &&
42 		!(current->personality & ADDR_NO_RANDOMIZE)) {
43 		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
44 	}
45 
46 	return max;
47 }
48 
49 /*
50  * Top of mmap area (just below the process stack).
51  *
52  * Leave an at least ~128 MB hole with possible stack randomization.
53  */
54 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
55 #define MAX_GAP (TASK_SIZE/6*5)
56 
mmap_is_legacy(void)57 static int mmap_is_legacy(void)
58 {
59 	if (current->personality & ADDR_COMPAT_LAYOUT)
60 		return 1;
61 
62 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
63 		return 1;
64 
65 	return sysctl_legacy_va_layout;
66 }
67 
arch_mmap_rnd(void)68 unsigned long arch_mmap_rnd(void)
69 {
70 	unsigned long rnd;
71 
72 	if (mmap_is_ia32())
73 #ifdef CONFIG_COMPAT
74 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
75 #else
76 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
77 #endif
78 	else
79 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
80 
81 	return rnd << PAGE_SHIFT;
82 }
83 
mmap_base(unsigned long rnd)84 static unsigned long mmap_base(unsigned long rnd)
85 {
86 	unsigned long gap = rlimit(RLIMIT_STACK);
87 
88 	if (gap < MIN_GAP)
89 		gap = MIN_GAP;
90 	else if (gap > MAX_GAP)
91 		gap = MAX_GAP;
92 
93 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
94 }
95 
96 /*
97  * This function, called very early during the creation of a new
98  * process VM image, sets up which VM layout function to use:
99  */
arch_pick_mmap_layout(struct mm_struct * mm)100 void arch_pick_mmap_layout(struct mm_struct *mm)
101 {
102 	unsigned long random_factor = 0UL;
103 
104 	if (current->flags & PF_RANDOMIZE)
105 		random_factor = arch_mmap_rnd();
106 
107 	mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
108 
109 	if (mmap_is_legacy()) {
110 		mm->mmap_base = mm->mmap_legacy_base;
111 		mm->get_unmapped_area = arch_get_unmapped_area;
112 	} else {
113 		mm->mmap_base = mmap_base(random_factor);
114 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
115 	}
116 }
117 
arch_vma_name(struct vm_area_struct * vma)118 const char *arch_vma_name(struct vm_area_struct *vma)
119 {
120 	if (vma->vm_flags & VM_MPX)
121 		return "[mpx]";
122 	return NULL;
123 }
124 
125 /*
126  * Only allow root to set high MMIO mappings to PROT_NONE.
127  * This prevents an unpriv. user to set them to PROT_NONE and invert
128  * them, then pointing to valid memory for L1TF speculation.
129  *
130  * Note: for locked down kernels may want to disable the root override.
131  */
pfn_modify_allowed(unsigned long pfn,pgprot_t prot)132 bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
133 {
134 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
135 		return true;
136 	if (!__pte_needs_invert(pgprot_val(prot)))
137 		return true;
138 	/* If it's real memory always allow */
139 	if (pfn_valid(pfn))
140 		return true;
141 	if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
142 		return false;
143 	return true;
144 }
145