• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/unicore32/mm/ioremap.c
4  *
5  * Code specific to PKUnity SoC and UniCore ISA
6  *
7  * Copyright (C) 2001-2010 GUAN Xue-tao
8  *
9  * Re-map IO memory to kernel address space so that we can access it.
10  *
11  * This allows a driver to remap an arbitrary region of bus memory into
12  * virtual space.  One should *only* use readl, writel, memcpy_toio and
13  * so on with such remapped areas.
14  *
15  * Because UniCore only has a 32-bit address space we can't address the
16  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
17  * allows us to circumvent this restriction by splitting PCI space into
18  * two 2GB chunks and mapping only one at a time into processor memory.
19  * We use MMU protection domains to trap any attempt to access the bank
20  * that is not currently mapped.  (This isn't fully implemented yet.)
21  */
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
27 
28 #include <asm/cputype.h>
29 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <linux/sizes.h>
34 
35 #include <mach/map.h>
36 #include "mm.h"
37 
38 /*
39  * Used by ioremap() and iounmap() code to mark (super)section-mapped
40  * I/O regions in vm_struct->flags field.
41  */
42 #define VM_UNICORE_SECTION_MAPPING	0x80000000
43 
ioremap_page(unsigned long virt,unsigned long phys,const struct mem_type * mtype)44 int ioremap_page(unsigned long virt, unsigned long phys,
45 		 const struct mem_type *mtype)
46 {
47 	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
48 				  __pgprot(mtype->prot_pte));
49 }
50 EXPORT_SYMBOL(ioremap_page);
51 
52 /*
53  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
54  * the other CPUs will not see this change until their next context switch.
55  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
56  * which requires the new ioremap'd region to be referenced, the CPU will
57  * reference the _old_ region.
58  *
59  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
60  * mask the size back to 4MB aligned or we will overflow in the loop below.
61  */
unmap_area_sections(unsigned long virt,unsigned long size)62 static void unmap_area_sections(unsigned long virt, unsigned long size)
63 {
64 	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
65 	pgd_t *pgd;
66 
67 	flush_cache_vunmap(addr, end);
68 	pgd = pgd_offset_k(addr);
69 	do {
70 		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
71 
72 		pmd = *pmdp;
73 		if (!pmd_none(pmd)) {
74 			/*
75 			 * Clear the PMD from the page table, and
76 			 * increment the kvm sequence so others
77 			 * notice this change.
78 			 *
79 			 * Note: this is still racy on SMP machines.
80 			 */
81 			pmd_clear(pmdp);
82 
83 			/*
84 			 * Free the page table, if there was one.
85 			 */
86 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
87 				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
88 		}
89 
90 		addr += PGDIR_SIZE;
91 		pgd++;
92 	} while (addr < end);
93 
94 	flush_tlb_kernel_range(virt, end);
95 }
96 
97 static int
remap_area_sections(unsigned long virt,unsigned long pfn,size_t size,const struct mem_type * type)98 remap_area_sections(unsigned long virt, unsigned long pfn,
99 		    size_t size, const struct mem_type *type)
100 {
101 	unsigned long addr = virt, end = virt + size;
102 	pgd_t *pgd;
103 
104 	/*
105 	 * Remove and free any PTE-based mapping, and
106 	 * sync the current kernel mapping.
107 	 */
108 	unmap_area_sections(virt, size);
109 
110 	pgd = pgd_offset_k(addr);
111 	do {
112 		pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
113 
114 		set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
115 		pfn += SZ_4M >> PAGE_SHIFT;
116 		flush_pmd_entry(pmd);
117 
118 		addr += PGDIR_SIZE;
119 		pgd++;
120 	} while (addr < end);
121 
122 	return 0;
123 }
124 
__uc32_ioremap_pfn_caller(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype,void * caller)125 void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
126 	unsigned long offset, size_t size, unsigned int mtype, void *caller)
127 {
128 	const struct mem_type *type;
129 	int err;
130 	unsigned long addr;
131 	struct vm_struct *area;
132 
133 	/*
134 	 * High mappings must be section aligned
135 	 */
136 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
137 		return NULL;
138 
139 	/*
140 	 * Don't allow RAM to be mapped
141 	 */
142 	if (pfn_valid(pfn)) {
143 		WARN(1, "BUG: Your driver calls ioremap() on\n"
144 			"system memory.  This leads to architecturally\n"
145 			"unpredictable behaviour, and ioremap() will fail in\n"
146 			"the next kernel release. Please fix your driver.\n");
147 		return NULL;
148 	}
149 
150 	type = get_mem_type(mtype);
151 	if (!type)
152 		return NULL;
153 
154 	/*
155 	 * Page align the mapping size, taking account of any offset.
156 	 */
157 	size = PAGE_ALIGN(offset + size);
158 
159 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
160 	if (!area)
161 		return NULL;
162 	addr = (unsigned long)area->addr;
163 
164 	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
165 		area->flags |= VM_UNICORE_SECTION_MAPPING;
166 		err = remap_area_sections(addr, pfn, size, type);
167 	} else
168 		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
169 					 __pgprot(type->prot_pte));
170 
171 	if (err) {
172 		vunmap((void *)addr);
173 		return NULL;
174 	}
175 
176 	flush_cache_vmap(addr, addr + size);
177 	return (void __iomem *) (offset + addr);
178 }
179 
__uc32_ioremap_caller(unsigned long phys_addr,size_t size,unsigned int mtype,void * caller)180 void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
181 	unsigned int mtype, void *caller)
182 {
183 	unsigned long last_addr;
184 	unsigned long offset = phys_addr & ~PAGE_MASK;
185 	unsigned long pfn = __phys_to_pfn(phys_addr);
186 
187 	/*
188 	 * Don't allow wraparound or zero size
189 	 */
190 	last_addr = phys_addr + size - 1;
191 	if (!size || last_addr < phys_addr)
192 		return NULL;
193 
194 	return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
195 }
196 
197 /*
198  * Remap an arbitrary physical address space into the kernel virtual
199  * address space. Needed when the kernel wants to access high addresses
200  * directly.
201  *
202  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
203  * have to convert them into an offset in a page-aligned mapping, but the
204  * caller shouldn't need to know that small detail.
205  */
206 void __iomem *
__uc32_ioremap_pfn(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype)207 __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
208 		  unsigned int mtype)
209 {
210 	return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
211 			__builtin_return_address(0));
212 }
213 EXPORT_SYMBOL(__uc32_ioremap_pfn);
214 
215 void __iomem *
__uc32_ioremap(unsigned long phys_addr,size_t size)216 __uc32_ioremap(unsigned long phys_addr, size_t size)
217 {
218 	return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
219 			__builtin_return_address(0));
220 }
221 EXPORT_SYMBOL(__uc32_ioremap);
222 
223 void __iomem *
__uc32_ioremap_cached(unsigned long phys_addr,size_t size)224 __uc32_ioremap_cached(unsigned long phys_addr, size_t size)
225 {
226 	return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
227 			__builtin_return_address(0));
228 }
229 EXPORT_SYMBOL(__uc32_ioremap_cached);
230 
__uc32_iounmap(volatile void __iomem * io_addr)231 void __uc32_iounmap(volatile void __iomem *io_addr)
232 {
233 	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
234 	struct vm_struct *vm;
235 
236 	/*
237 	 * If this is a section based mapping we need to handle it
238 	 * specially as the VM subsystem does not know how to handle
239 	 * such a beast. We need the lock here b/c we need to clear
240 	 * all the mappings before the area can be reclaimed
241 	 * by someone else.
242 	 */
243 	vm = find_vm_area(addr);
244 	if (vm && (vm->flags & VM_IOREMAP) &&
245 		(vm->flags & VM_UNICORE_SECTION_MAPPING))
246 		unmap_area_sections((unsigned long)vm->addr, vm->size);
247 
248 	vunmap(addr);
249 }
250 EXPORT_SYMBOL(__uc32_iounmap);
251