1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/ioremap.c
4 *
5 * Re-map IO memory to kernel address space so that we can access it.
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 *
9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
10 * Hacked to allow all architectures to build, and various cleanups
11 * by Russell King
12 *
13 * This allows a driver to remap an arbitrary region of bus memory into
14 * virtual space. One should *only* use readl, writel, memcpy_toio and
15 * so on with such remapped areas.
16 *
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
19 * allows us to circumvent this restriction by splitting PCI space into
20 * two 2GB chunks and mapping only one at a time into processor memory.
21 * We use MMU protection domains to trap any attempt to access the bank
22 * that is not currently mapped. (This isn't fully implemented yet.)
23 */
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/mm.h>
27 #include <linux/vmalloc.h>
28 #include <linux/io.h>
29 #include <linux/sizes.h>
30 #include <linux/memblock.h>
31
32 #include <asm/cp15.h>
33 #include <asm/cputype.h>
34 #include <asm/cacheflush.h>
35 #include <asm/early_ioremap.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
39 #include <asm/system_info.h>
40
41 #include <asm/mach/map.h>
42 #include <asm/mach/pci.h>
43 #include "mm.h"
44
45
46 LIST_HEAD(static_vmlist);
47
find_static_vm_paddr(phys_addr_t paddr,size_t size,unsigned int mtype)48 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
49 size_t size, unsigned int mtype)
50 {
51 struct static_vm *svm;
52 struct vm_struct *vm;
53
54 list_for_each_entry(svm, &static_vmlist, list) {
55 vm = &svm->vm;
56 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
57 continue;
58 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
59 continue;
60
61 if (vm->phys_addr > paddr ||
62 paddr + size - 1 > vm->phys_addr + vm->size - 1)
63 continue;
64
65 return svm;
66 }
67
68 return NULL;
69 }
70
find_static_vm_vaddr(void * vaddr)71 struct static_vm *find_static_vm_vaddr(void *vaddr)
72 {
73 struct static_vm *svm;
74 struct vm_struct *vm;
75
76 list_for_each_entry(svm, &static_vmlist, list) {
77 vm = &svm->vm;
78
79 /* static_vmlist is ascending order */
80 if (vm->addr > vaddr)
81 break;
82
83 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
84 return svm;
85 }
86
87 return NULL;
88 }
89
add_static_vm_early(struct static_vm * svm)90 void __init add_static_vm_early(struct static_vm *svm)
91 {
92 struct static_vm *curr_svm;
93 struct vm_struct *vm;
94 void *vaddr;
95
96 vm = &svm->vm;
97 vm_area_add_early(vm);
98 vaddr = vm->addr;
99
100 list_for_each_entry(curr_svm, &static_vmlist, list) {
101 vm = &curr_svm->vm;
102
103 if (vm->addr > vaddr)
104 break;
105 }
106 list_add_tail(&svm->list, &curr_svm->list);
107 }
108
ioremap_page(unsigned long virt,unsigned long phys,const struct mem_type * mtype)109 int ioremap_page(unsigned long virt, unsigned long phys,
110 const struct mem_type *mtype)
111 {
112 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
113 __pgprot(mtype->prot_pte));
114 }
115 EXPORT_SYMBOL(ioremap_page);
116
__check_vmalloc_seq(struct mm_struct * mm)117 void __check_vmalloc_seq(struct mm_struct *mm)
118 {
119 unsigned int seq;
120
121 do {
122 seq = init_mm.context.vmalloc_seq;
123 memcpy(pgd_offset(mm, VMALLOC_START),
124 pgd_offset_k(VMALLOC_START),
125 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
126 pgd_index(VMALLOC_START)));
127 mm->context.vmalloc_seq = seq;
128 } while (seq != init_mm.context.vmalloc_seq);
129 }
130
131 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
132 /*
133 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
134 * the other CPUs will not see this change until their next context switch.
135 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
136 * which requires the new ioremap'd region to be referenced, the CPU will
137 * reference the _old_ region.
138 *
139 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
140 * mask the size back to 1MB aligned or we will overflow in the loop below.
141 */
unmap_area_sections(unsigned long virt,unsigned long size)142 static void unmap_area_sections(unsigned long virt, unsigned long size)
143 {
144 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
145 pmd_t *pmdp = pmd_off_k(addr);
146
147 do {
148 pmd_t pmd = *pmdp;
149
150 if (!pmd_none(pmd)) {
151 /*
152 * Clear the PMD from the page table, and
153 * increment the vmalloc sequence so others
154 * notice this change.
155 *
156 * Note: this is still racy on SMP machines.
157 */
158 pmd_clear(pmdp);
159 init_mm.context.vmalloc_seq++;
160
161 /*
162 * Free the page table, if there was one.
163 */
164 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
165 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
166 }
167
168 addr += PMD_SIZE;
169 pmdp += 2;
170 } while (addr < end);
171
172 /*
173 * Ensure that the active_mm is up to date - we want to
174 * catch any use-after-iounmap cases.
175 */
176 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
177 __check_vmalloc_seq(current->active_mm);
178
179 flush_tlb_kernel_range(virt, end);
180 }
181
182 static int
remap_area_sections(unsigned long virt,unsigned long pfn,size_t size,const struct mem_type * type)183 remap_area_sections(unsigned long virt, unsigned long pfn,
184 size_t size, const struct mem_type *type)
185 {
186 unsigned long addr = virt, end = virt + size;
187 pmd_t *pmd = pmd_off_k(addr);
188
189 /*
190 * Remove and free any PTE-based mapping, and
191 * sync the current kernel mapping.
192 */
193 unmap_area_sections(virt, size);
194
195 do {
196 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
197 pfn += SZ_1M >> PAGE_SHIFT;
198 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
199 pfn += SZ_1M >> PAGE_SHIFT;
200 flush_pmd_entry(pmd);
201
202 addr += PMD_SIZE;
203 pmd += 2;
204 } while (addr < end);
205
206 return 0;
207 }
208
209 static int
remap_area_supersections(unsigned long virt,unsigned long pfn,size_t size,const struct mem_type * type)210 remap_area_supersections(unsigned long virt, unsigned long pfn,
211 size_t size, const struct mem_type *type)
212 {
213 unsigned long addr = virt, end = virt + size;
214 pmd_t *pmd = pmd_off_k(addr);
215
216 /*
217 * Remove and free any PTE-based mapping, and
218 * sync the current kernel mapping.
219 */
220 unmap_area_sections(virt, size);
221 do {
222 unsigned long super_pmd_val, i;
223
224 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
225 PMD_SECT_SUPER;
226 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
227
228 for (i = 0; i < 8; i++) {
229 pmd[0] = __pmd(super_pmd_val);
230 pmd[1] = __pmd(super_pmd_val);
231 flush_pmd_entry(pmd);
232
233 addr += PMD_SIZE;
234 pmd += 2;
235 }
236
237 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
238 } while (addr < end);
239
240 return 0;
241 }
242 #endif
243
__arm_ioremap_pfn_caller(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype,void * caller)244 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
245 unsigned long offset, size_t size, unsigned int mtype, void *caller)
246 {
247 const struct mem_type *type;
248 int err;
249 unsigned long addr;
250 struct vm_struct *area;
251 phys_addr_t paddr = __pfn_to_phys(pfn);
252
253 #ifndef CONFIG_ARM_LPAE
254 /*
255 * High mappings must be supersection aligned
256 */
257 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
258 return NULL;
259 #endif
260
261 type = get_mem_type(mtype);
262 if (!type)
263 return NULL;
264
265 /*
266 * Page align the mapping size, taking account of any offset.
267 */
268 size = PAGE_ALIGN(offset + size);
269
270 /*
271 * Try to reuse one of the static mapping whenever possible.
272 */
273 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
274 struct static_vm *svm;
275
276 svm = find_static_vm_paddr(paddr, size, mtype);
277 if (svm) {
278 addr = (unsigned long)svm->vm.addr;
279 addr += paddr - svm->vm.phys_addr;
280 return (void __iomem *) (offset + addr);
281 }
282 }
283
284 /*
285 * Don't allow RAM to be mapped with mismatched attributes - this
286 * causes problems with ARMv6+
287 */
288 if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
289 mtype != MT_MEMORY_RW))
290 return NULL;
291
292 area = get_vm_area_caller(size, VM_IOREMAP, caller);
293 if (!area)
294 return NULL;
295 addr = (unsigned long)area->addr;
296 area->phys_addr = paddr;
297
298 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
299 if (DOMAIN_IO == 0 &&
300 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
301 cpu_is_xsc3()) && pfn >= 0x100000 &&
302 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
303 area->flags |= VM_ARM_SECTION_MAPPING;
304 err = remap_area_supersections(addr, pfn, size, type);
305 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
306 area->flags |= VM_ARM_SECTION_MAPPING;
307 err = remap_area_sections(addr, pfn, size, type);
308 } else
309 #endif
310 err = ioremap_page_range(addr, addr + size, paddr,
311 __pgprot(type->prot_pte));
312
313 if (err) {
314 vunmap((void *)addr);
315 return NULL;
316 }
317
318 flush_cache_vmap(addr, addr + size);
319 return (void __iomem *) (offset + addr);
320 }
321
__arm_ioremap_caller(phys_addr_t phys_addr,size_t size,unsigned int mtype,void * caller)322 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
323 unsigned int mtype, void *caller)
324 {
325 phys_addr_t last_addr;
326 unsigned long offset = phys_addr & ~PAGE_MASK;
327 unsigned long pfn = __phys_to_pfn(phys_addr);
328
329 /*
330 * Don't allow wraparound or zero size
331 */
332 last_addr = phys_addr + size - 1;
333 if (!size || last_addr < phys_addr)
334 return NULL;
335
336 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
337 caller);
338 }
339
340 /*
341 * Remap an arbitrary physical address space into the kernel virtual
342 * address space. Needed when the kernel wants to access high addresses
343 * directly.
344 *
345 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
346 * have to convert them into an offset in a page-aligned mapping, but the
347 * caller shouldn't need to know that small detail.
348 */
349 void __iomem *
__arm_ioremap_pfn(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype)350 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
351 unsigned int mtype)
352 {
353 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
354 __builtin_return_address(0));
355 }
356 EXPORT_SYMBOL(__arm_ioremap_pfn);
357
358 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
359 unsigned int, void *) =
360 __arm_ioremap_caller;
361
ioremap(resource_size_t res_cookie,size_t size)362 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
363 {
364 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
365 __builtin_return_address(0));
366 }
367 EXPORT_SYMBOL(ioremap);
368
ioremap_cache(resource_size_t res_cookie,size_t size)369 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
370 {
371 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
372 __builtin_return_address(0));
373 }
374 EXPORT_SYMBOL(ioremap_cache);
375
ioremap_wc(resource_size_t res_cookie,size_t size)376 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
377 {
378 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
379 __builtin_return_address(0));
380 }
381 EXPORT_SYMBOL(ioremap_wc);
382
383 /*
384 * Remap an arbitrary physical address space into the kernel virtual
385 * address space as memory. Needed when the kernel wants to execute
386 * code in external memory. This is needed for reprogramming source
387 * clocks that would affect normal memory for example. Please see
388 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
389 */
390 void __iomem *
__arm_ioremap_exec(phys_addr_t phys_addr,size_t size,bool cached)391 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
392 {
393 unsigned int mtype;
394
395 if (cached)
396 mtype = MT_MEMORY_RWX;
397 else
398 mtype = MT_MEMORY_RWX_NONCACHED;
399
400 return __arm_ioremap_caller(phys_addr, size, mtype,
401 __builtin_return_address(0));
402 }
403
arch_memremap_wb(phys_addr_t phys_addr,size_t size)404 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
405 {
406 return (__force void *)arch_ioremap_caller(phys_addr, size,
407 MT_MEMORY_RW,
408 __builtin_return_address(0));
409 }
410
__iounmap(volatile void __iomem * io_addr)411 void __iounmap(volatile void __iomem *io_addr)
412 {
413 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
414 struct static_vm *svm;
415
416 /* If this is a static mapping, we must leave it alone */
417 svm = find_static_vm_vaddr(addr);
418 if (svm)
419 return;
420
421 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
422 {
423 struct vm_struct *vm;
424
425 vm = find_vm_area(addr);
426
427 /*
428 * If this is a section based mapping we need to handle it
429 * specially as the VM subsystem does not know how to handle
430 * such a beast.
431 */
432 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
433 unmap_area_sections((unsigned long)vm->addr, vm->size);
434 }
435 #endif
436
437 vunmap(addr);
438 }
439
440 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
441
iounmap(volatile void __iomem * cookie)442 void iounmap(volatile void __iomem *cookie)
443 {
444 arch_iounmap(cookie);
445 }
446 EXPORT_SYMBOL(iounmap);
447
448 #ifdef CONFIG_PCI
449 static int pci_ioremap_mem_type = MT_DEVICE;
450
pci_ioremap_set_mem_type(int mem_type)451 void pci_ioremap_set_mem_type(int mem_type)
452 {
453 pci_ioremap_mem_type = mem_type;
454 }
455
pci_ioremap_io(unsigned int offset,phys_addr_t phys_addr)456 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
457 {
458 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
459
460 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
461 PCI_IO_VIRT_BASE + offset + SZ_64K,
462 phys_addr,
463 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
464 }
465 EXPORT_SYMBOL_GPL(pci_ioremap_io);
466
pci_remap_cfgspace(resource_size_t res_cookie,size_t size)467 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
468 {
469 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
470 __builtin_return_address(0));
471 }
472 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
473 #endif
474
475 /*
476 * Must be called after early_fixmap_init
477 */
early_ioremap_init(void)478 void __init early_ioremap_init(void)
479 {
480 early_ioremap_setup();
481 }
482