• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/sh/mm/mmap.c
3  *
4  * Copyright (C) 2008  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <asm/page.h>
15 #include <asm/processor.h>
16 
17 #ifdef CONFIG_MMU
18 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
20 
21 /*
22  * To avoid cache aliases, we map the shared page with same color.
23  */
24 #define COLOUR_ALIGN(addr, pgoff)				\
25 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
26 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
27 
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)28 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
29 	unsigned long len, unsigned long pgoff, unsigned long flags)
30 {
31 	struct mm_struct *mm = current->mm;
32 	struct vm_area_struct *vma;
33 	unsigned long start_addr;
34 	int do_colour_align;
35 
36 	if (flags & MAP_FIXED) {
37 		/* We do not accept a shared mapping if it would violate
38 		 * cache aliasing constraints.
39 		 */
40 		if ((flags & MAP_SHARED) && (addr & shm_align_mask))
41 			return -EINVAL;
42 		return addr;
43 	}
44 
45 	if (unlikely(len > TASK_SIZE))
46 		return -ENOMEM;
47 
48 	do_colour_align = 0;
49 	if (filp || (flags & MAP_SHARED))
50 		do_colour_align = 1;
51 
52 	if (addr) {
53 		if (do_colour_align)
54 			addr = COLOUR_ALIGN(addr, pgoff);
55 		else
56 			addr = PAGE_ALIGN(addr);
57 
58 		vma = find_vma(mm, addr);
59 		if (TASK_SIZE - len >= addr &&
60 		    (!vma || addr + len <= vma->vm_start))
61 			return addr;
62 	}
63 
64 	if (len > mm->cached_hole_size) {
65 		start_addr = addr = mm->free_area_cache;
66 	} else {
67 	        mm->cached_hole_size = 0;
68 		start_addr = addr = TASK_UNMAPPED_BASE;
69 	}
70 
71 full_search:
72 	if (do_colour_align)
73 		addr = COLOUR_ALIGN(addr, pgoff);
74 	else
75 		addr = PAGE_ALIGN(mm->free_area_cache);
76 
77 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
78 		/* At this point:  (!vma || addr < vma->vm_end). */
79 		if (unlikely(TASK_SIZE - len < addr)) {
80 			/*
81 			 * Start a new search - just in case we missed
82 			 * some holes.
83 			 */
84 			if (start_addr != TASK_UNMAPPED_BASE) {
85 				start_addr = addr = TASK_UNMAPPED_BASE;
86 				mm->cached_hole_size = 0;
87 				goto full_search;
88 			}
89 			return -ENOMEM;
90 		}
91 		if (likely(!vma || addr + len <= vma->vm_start)) {
92 			/*
93 			 * Remember the place where we stopped the search:
94 			 */
95 			mm->free_area_cache = addr + len;
96 			return addr;
97 		}
98 		if (addr + mm->cached_hole_size < vma->vm_start)
99 		        mm->cached_hole_size = vma->vm_start - addr;
100 
101 		addr = vma->vm_end;
102 		if (do_colour_align)
103 			addr = COLOUR_ALIGN(addr, pgoff);
104 	}
105 }
106 #endif /* CONFIG_MMU */
107 
108 /*
109  * You really shouldn't be using read() or write() on /dev/mem.  This
110  * might go away in the future.
111  */
valid_phys_addr_range(unsigned long addr,size_t count)112 int valid_phys_addr_range(unsigned long addr, size_t count)
113 {
114 	if (addr < __MEMORY_START)
115 		return 0;
116 	if (addr + count > __pa(high_memory))
117 		return 0;
118 
119 	return 1;
120 }
121 
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)122 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
123 {
124 	return 1;
125 }
126