• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/sh/mm/mmap.c
3  *
4  * Copyright (C) 2008 - 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <asm/page.h>
15 #include <asm/processor.h>
16 
17 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
18 EXPORT_SYMBOL(shm_align_mask);
19 
20 #ifdef CONFIG_MMU
21 /*
22  * To avoid cache aliases, we map the shared page with same color.
23  */
COLOUR_ALIGN(unsigned long addr,unsigned long pgoff)24 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
25 					 unsigned long pgoff)
26 {
27 	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
29 
30 	return base + off;
31 }
32 
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)33 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
34 	unsigned long len, unsigned long pgoff, unsigned long flags)
35 {
36 	struct mm_struct *mm = current->mm;
37 	struct vm_area_struct *vma;
38 	int do_colour_align;
39 	struct vm_unmapped_area_info info;
40 
41 	if (flags & MAP_FIXED) {
42 		/* We do not accept a shared mapping if it would violate
43 		 * cache aliasing constraints.
44 		 */
45 		if ((flags & MAP_SHARED) &&
46 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
47 			return -EINVAL;
48 		return addr;
49 	}
50 
51 	if (unlikely(len > TASK_SIZE))
52 		return -ENOMEM;
53 
54 	do_colour_align = 0;
55 	if (filp || (flags & MAP_SHARED))
56 		do_colour_align = 1;
57 
58 	if (addr) {
59 		if (do_colour_align)
60 			addr = COLOUR_ALIGN(addr, pgoff);
61 		else
62 			addr = PAGE_ALIGN(addr);
63 
64 		vma = find_vma(mm, addr);
65 		if (TASK_SIZE - len >= addr &&
66 		    (!vma || addr + len <= vm_start_gap(vma)))
67 			return addr;
68 	}
69 
70 	info.flags = 0;
71 	info.length = len;
72 	info.low_limit = TASK_UNMAPPED_BASE;
73 	info.high_limit = TASK_SIZE;
74 	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
75 	info.align_offset = pgoff << PAGE_SHIFT;
76 	return vm_unmapped_area(&info);
77 }
78 
79 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)80 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81 			  const unsigned long len, const unsigned long pgoff,
82 			  const unsigned long flags)
83 {
84 	struct vm_area_struct *vma;
85 	struct mm_struct *mm = current->mm;
86 	unsigned long addr = addr0;
87 	int do_colour_align;
88 	struct vm_unmapped_area_info info;
89 
90 	if (flags & MAP_FIXED) {
91 		/* We do not accept a shared mapping if it would violate
92 		 * cache aliasing constraints.
93 		 */
94 		if ((flags & MAP_SHARED) &&
95 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
96 			return -EINVAL;
97 		return addr;
98 	}
99 
100 	if (unlikely(len > TASK_SIZE))
101 		return -ENOMEM;
102 
103 	do_colour_align = 0;
104 	if (filp || (flags & MAP_SHARED))
105 		do_colour_align = 1;
106 
107 	/* requesting a specific address */
108 	if (addr) {
109 		if (do_colour_align)
110 			addr = COLOUR_ALIGN(addr, pgoff);
111 		else
112 			addr = PAGE_ALIGN(addr);
113 
114 		vma = find_vma(mm, addr);
115 		if (TASK_SIZE - len >= addr &&
116 		    (!vma || addr + len <= vm_start_gap(vma)))
117 			return addr;
118 	}
119 
120 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
121 	info.length = len;
122 	info.low_limit = PAGE_SIZE;
123 	info.high_limit = mm->mmap_base;
124 	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
125 	info.align_offset = pgoff << PAGE_SHIFT;
126 	addr = vm_unmapped_area(&info);
127 
128 	/*
129 	 * A failed mmap() very likely causes application failure,
130 	 * so fall back to the bottom-up function here. This scenario
131 	 * can happen with large stack limits and large mmap()
132 	 * allocations.
133 	 */
134 	if (addr & ~PAGE_MASK) {
135 		VM_BUG_ON(addr != -ENOMEM);
136 		info.flags = 0;
137 		info.low_limit = TASK_UNMAPPED_BASE;
138 		info.high_limit = TASK_SIZE;
139 		addr = vm_unmapped_area(&info);
140 	}
141 
142 	return addr;
143 }
144 #endif /* CONFIG_MMU */
145 
146 /*
147  * You really shouldn't be using read() or write() on /dev/mem.  This
148  * might go away in the future.
149  */
valid_phys_addr_range(phys_addr_t addr,size_t count)150 int valid_phys_addr_range(phys_addr_t addr, size_t count)
151 {
152 	if (addr < __MEMORY_START)
153 		return 0;
154 	if (addr + count > __pa(high_memory))
155 		return 0;
156 
157 	return 1;
158 }
159 
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)160 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
161 {
162 	return 1;
163 }
164