• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/io.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cachetype.h>
13 
14 #define COLOUR_ALIGN(addr,pgoff)		\
15 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
16 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17 
18 /* gap between mmap and stack */
19 #define MIN_GAP (128*1024*1024UL)
20 #define MAX_GAP ((TASK_SIZE)/6*5)
21 
mmap_is_legacy(void)22 static int mmap_is_legacy(void)
23 {
24 	if (current->personality & ADDR_COMPAT_LAYOUT)
25 		return 1;
26 
27 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
28 		return 1;
29 
30 	return sysctl_legacy_va_layout;
31 }
32 
mmap_base(unsigned long rnd)33 static unsigned long mmap_base(unsigned long rnd)
34 {
35 	unsigned long gap = rlimit(RLIMIT_STACK);
36 
37 	if (gap < MIN_GAP)
38 		gap = MIN_GAP;
39 	else if (gap > MAX_GAP)
40 		gap = MAX_GAP;
41 
42 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
43 }
44 
45 /*
46  * We need to ensure that shared mappings are correctly aligned to
47  * avoid aliasing issues with VIPT caches.  We need to ensure that
48  * a specific page of an object is always mapped at a multiple of
49  * SHMLBA bytes.
50  *
51  * We unconditionally provide this function for all cases, however
52  * in the VIVT case, we optimise out the alignment rules.
53  */
54 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)55 arch_get_unmapped_area(struct file *filp, unsigned long addr,
56 		unsigned long len, unsigned long pgoff, unsigned long flags)
57 {
58 	struct mm_struct *mm = current->mm;
59 	struct vm_area_struct *vma;
60 	int do_align = 0;
61 	int aliasing = cache_is_vipt_aliasing();
62 	struct vm_unmapped_area_info info;
63 
64 	/*
65 	 * We only need to do colour alignment if either the I or D
66 	 * caches alias.
67 	 */
68 	if (aliasing)
69 		do_align = filp || (flags & MAP_SHARED);
70 
71 	/*
72 	 * We enforce the MAP_FIXED case.
73 	 */
74 	if (flags & MAP_FIXED) {
75 		if (aliasing && flags & MAP_SHARED &&
76 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
77 			return -EINVAL;
78 		return addr;
79 	}
80 
81 	if (len > TASK_SIZE)
82 		return -ENOMEM;
83 
84 	if (addr) {
85 		if (do_align)
86 			addr = COLOUR_ALIGN(addr, pgoff);
87 		else
88 			addr = PAGE_ALIGN(addr);
89 
90 		vma = find_vma(mm, addr);
91 		if (TASK_SIZE - len >= addr &&
92 		    (!vma || addr + len <= vm_start_gap(vma)))
93 			return addr;
94 	}
95 
96 	info.flags = 0;
97 	info.length = len;
98 	info.low_limit = mm->mmap_base;
99 	info.high_limit = TASK_SIZE;
100 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101 	info.align_offset = pgoff << PAGE_SHIFT;
102 	return vm_unmapped_area(&info);
103 }
104 
105 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)106 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107 			const unsigned long len, const unsigned long pgoff,
108 			const unsigned long flags)
109 {
110 	struct vm_area_struct *vma;
111 	struct mm_struct *mm = current->mm;
112 	unsigned long addr = addr0;
113 	int do_align = 0;
114 	int aliasing = cache_is_vipt_aliasing();
115 	struct vm_unmapped_area_info info;
116 
117 	/*
118 	 * We only need to do colour alignment if either the I or D
119 	 * caches alias.
120 	 */
121 	if (aliasing)
122 		do_align = filp || (flags & MAP_SHARED);
123 
124 	/* requested length too big for entire address space */
125 	if (len > TASK_SIZE)
126 		return -ENOMEM;
127 
128 	if (flags & MAP_FIXED) {
129 		if (aliasing && flags & MAP_SHARED &&
130 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131 			return -EINVAL;
132 		return addr;
133 	}
134 
135 	/* requesting a specific address */
136 	if (addr) {
137 		if (do_align)
138 			addr = COLOUR_ALIGN(addr, pgoff);
139 		else
140 			addr = PAGE_ALIGN(addr);
141 		vma = find_vma(mm, addr);
142 		if (TASK_SIZE - len >= addr &&
143 				(!vma || addr + len <= vm_start_gap(vma)))
144 			return addr;
145 	}
146 
147 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148 	info.length = len;
149 	info.low_limit = FIRST_USER_ADDRESS;
150 	info.high_limit = mm->mmap_base;
151 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152 	info.align_offset = pgoff << PAGE_SHIFT;
153 	addr = vm_unmapped_area(&info);
154 
155 	/*
156 	 * A failed mmap() very likely causes application failure,
157 	 * so fall back to the bottom-up function here. This scenario
158 	 * can happen with large stack limits and large mmap()
159 	 * allocations.
160 	 */
161 	if (addr & ~PAGE_MASK) {
162 		VM_BUG_ON(addr != -ENOMEM);
163 		info.flags = 0;
164 		info.low_limit = mm->mmap_base;
165 		info.high_limit = TASK_SIZE;
166 		addr = vm_unmapped_area(&info);
167 	}
168 
169 	return addr;
170 }
171 
arch_mmap_rnd(void)172 unsigned long arch_mmap_rnd(void)
173 {
174 	unsigned long rnd;
175 
176 	rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
177 
178 	return rnd << PAGE_SHIFT;
179 }
180 
arch_pick_mmap_layout(struct mm_struct * mm)181 void arch_pick_mmap_layout(struct mm_struct *mm)
182 {
183 	unsigned long random_factor = 0UL;
184 
185 	if (current->flags & PF_RANDOMIZE)
186 		random_factor = arch_mmap_rnd();
187 
188 	if (mmap_is_legacy()) {
189 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
190 		mm->get_unmapped_area = arch_get_unmapped_area;
191 	} else {
192 		mm->mmap_base = mmap_base(random_factor);
193 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
194 	}
195 }
196 
197 /*
198  * You really shouldn't be using read() or write() on /dev/mem.  This
199  * might go away in the future.
200  */
valid_phys_addr_range(phys_addr_t addr,size_t size)201 int valid_phys_addr_range(phys_addr_t addr, size_t size)
202 {
203 	if (addr < PHYS_OFFSET)
204 		return 0;
205 	if (addr + size > __pa(high_memory - 1) + 1)
206 		return 0;
207 
208 	return 1;
209 }
210 
211 /*
212  * Do not allow /dev/mem mappings beyond the supported physical range.
213  */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)214 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
215 {
216 	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
217 }
218 
219 #ifdef CONFIG_STRICT_DEVMEM
220 
221 #include <linux/ioport.h>
222 
223 /*
224  * devmem_is_allowed() checks to see if /dev/mem access to a certain
225  * address is valid. The argument is a physical page number.
226  * We mimic x86 here by disallowing access to system RAM as well as
227  * device-exclusive MMIO regions. This effectively disable read()/write()
228  * on /dev/mem.
229  */
devmem_is_allowed(unsigned long pfn)230 int devmem_is_allowed(unsigned long pfn)
231 {
232 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
233 		return 0;
234 	if (!page_is_ram(pfn))
235 		return 1;
236 	return 0;
237 }
238 
239 #endif
240