• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8 
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24 
25 #include "physaddr.h"
26 
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
ioremap_change_attr(unsigned long vaddr,unsigned long size,unsigned long prot_val)31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 			       unsigned long prot_val)
33 {
34 	unsigned long nrpages = size >> PAGE_SHIFT;
35 	int err;
36 
37 	switch (prot_val) {
38 	case _PAGE_CACHE_UC:
39 	default:
40 		err = _set_memory_uc(vaddr, nrpages);
41 		break;
42 	case _PAGE_CACHE_WC:
43 		err = _set_memory_wc(vaddr, nrpages);
44 		break;
45 	case _PAGE_CACHE_WB:
46 		err = _set_memory_wb(vaddr, nrpages);
47 		break;
48 	}
49 
50 	return err;
51 }
52 
53 /*
54  * Remap an arbitrary physical address space into the kernel virtual
55  * address space. Needed when the kernel wants to access high addresses
56  * directly.
57  *
58  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59  * have to convert them into an offset in a page-aligned mapping, but the
60  * caller shouldn't need to know that small detail.
61  */
__ioremap_caller(resource_size_t phys_addr,unsigned long size,unsigned long prot_val,void * caller)62 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 		unsigned long size, unsigned long prot_val, void *caller)
64 {
65 	unsigned long offset, vaddr;
66 	resource_size_t pfn, last_pfn, last_addr;
67 	const resource_size_t unaligned_phys_addr = phys_addr;
68 	const unsigned long unaligned_size = size;
69 	struct vm_struct *area;
70 	unsigned long new_prot_val;
71 	pgprot_t prot;
72 	int retval;
73 	void __iomem *ret_addr;
74 
75 	/* Don't allow wraparound or zero size */
76 	last_addr = phys_addr + size - 1;
77 	if (!size || last_addr < phys_addr)
78 		return NULL;
79 
80 	if (!phys_addr_valid(phys_addr)) {
81 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82 		       (unsigned long long)phys_addr);
83 		WARN_ON_ONCE(1);
84 		return NULL;
85 	}
86 
87 	/*
88 	 * Don't remap the low PCI/ISA area, it's always mapped..
89 	 */
90 	if (is_ISA_range(phys_addr, last_addr))
91 		return (__force void __iomem *)phys_to_virt(phys_addr);
92 
93 	/*
94 	 * Don't allow anybody to remap normal RAM that we're using..
95 	 */
96 	last_pfn = last_addr >> PAGE_SHIFT;
97 	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
98 		int is_ram = page_is_ram(pfn);
99 
100 		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
101 			return NULL;
102 		WARN_ON_ONCE(is_ram);
103 	}
104 
105 	/*
106 	 * Mappings have to be page-aligned
107 	 */
108 	offset = phys_addr & ~PAGE_MASK;
109 	phys_addr &= PHYSICAL_PAGE_MASK;
110 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
111 
112 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
113 						prot_val, &new_prot_val);
114 	if (retval) {
115 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
116 		return NULL;
117 	}
118 
119 	if (prot_val != new_prot_val) {
120 		if (!is_new_memtype_allowed(phys_addr, size,
121 					    prot_val, new_prot_val)) {
122 			printk(KERN_ERR
123 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
124 				(unsigned long long)phys_addr,
125 				(unsigned long long)(phys_addr + size),
126 				prot_val, new_prot_val);
127 			goto err_free_memtype;
128 		}
129 		prot_val = new_prot_val;
130 	}
131 
132 	switch (prot_val) {
133 	case _PAGE_CACHE_UC:
134 	default:
135 		prot = PAGE_KERNEL_IO_NOCACHE;
136 		break;
137 	case _PAGE_CACHE_UC_MINUS:
138 		prot = PAGE_KERNEL_IO_UC_MINUS;
139 		break;
140 	case _PAGE_CACHE_WC:
141 		prot = PAGE_KERNEL_IO_WC;
142 		break;
143 	case _PAGE_CACHE_WB:
144 		prot = PAGE_KERNEL_IO;
145 		break;
146 	}
147 
148 	/*
149 	 * Ok, go for it..
150 	 */
151 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
152 	if (!area)
153 		goto err_free_memtype;
154 	area->phys_addr = phys_addr;
155 	vaddr = (unsigned long) area->addr;
156 
157 	if (kernel_map_sync_memtype(phys_addr, size, prot_val))
158 		goto err_free_area;
159 
160 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
161 		goto err_free_area;
162 
163 	ret_addr = (void __iomem *) (vaddr + offset);
164 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
165 
166 	/*
167 	 * Check if the request spans more than any BAR in the iomem resource
168 	 * tree.
169 	 */
170 	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
171 		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
172 
173 	return ret_addr;
174 err_free_area:
175 	free_vm_area(area);
176 err_free_memtype:
177 	free_memtype(phys_addr, phys_addr + size);
178 	return NULL;
179 }
180 
181 /**
182  * ioremap_nocache     -   map bus memory into CPU space
183  * @phys_addr:    bus address of the memory
184  * @size:      size of the resource to map
185  *
186  * ioremap_nocache performs a platform specific sequence of operations to
187  * make bus memory CPU accessible via the readb/readw/readl/writeb/
188  * writew/writel functions and the other mmio helpers. The returned
189  * address is not guaranteed to be usable directly as a virtual
190  * address.
191  *
192  * This version of ioremap ensures that the memory is marked uncachable
193  * on the CPU as well as honouring existing caching rules from things like
194  * the PCI bus. Note that there are other caches and buffers on many
195  * busses. In particular driver authors should read up on PCI writes
196  *
197  * It's useful if some control registers are in such an area and
198  * write combining or read caching is not desirable:
199  *
200  * Must be freed with iounmap.
201  */
ioremap_nocache(resource_size_t phys_addr,unsigned long size)202 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
203 {
204 	/*
205 	 * Ideally, this should be:
206 	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
207 	 *
208 	 * Till we fix all X drivers to use ioremap_wc(), we will use
209 	 * UC MINUS.
210 	 */
211 	unsigned long val = _PAGE_CACHE_UC_MINUS;
212 
213 	return __ioremap_caller(phys_addr, size, val,
214 				__builtin_return_address(0));
215 }
216 EXPORT_SYMBOL(ioremap_nocache);
217 
218 /**
219  * ioremap_wc	-	map memory into CPU space write combined
220  * @phys_addr:	bus address of the memory
221  * @size:	size of the resource to map
222  *
223  * This version of ioremap ensures that the memory is marked write combining.
224  * Write combining allows faster writes to some hardware devices.
225  *
226  * Must be freed with iounmap.
227  */
ioremap_wc(resource_size_t phys_addr,unsigned long size)228 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
229 {
230 	if (pat_enabled)
231 		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232 					__builtin_return_address(0));
233 	else
234 		return ioremap_nocache(phys_addr, size);
235 }
236 EXPORT_SYMBOL(ioremap_wc);
237 
ioremap_cache(resource_size_t phys_addr,unsigned long size)238 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
239 {
240 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241 				__builtin_return_address(0));
242 }
243 EXPORT_SYMBOL(ioremap_cache);
244 
ioremap_prot(resource_size_t phys_addr,unsigned long size,unsigned long prot_val)245 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246 				unsigned long prot_val)
247 {
248 	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
249 				__builtin_return_address(0));
250 }
251 EXPORT_SYMBOL(ioremap_prot);
252 
253 /**
254  * iounmap - Free a IO remapping
255  * @addr: virtual address from ioremap_*
256  *
257  * Caller must ensure there is only one unmapping for the same pointer.
258  */
iounmap(volatile void __iomem * addr)259 void iounmap(volatile void __iomem *addr)
260 {
261 	struct vm_struct *p, *o;
262 
263 	if ((void __force *)addr <= high_memory)
264 		return;
265 
266 	/*
267 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
268 	 * vm_area and by simply returning an address into the kernel mapping
269 	 * of ISA space.   So handle that here.
270 	 */
271 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
273 		return;
274 
275 	addr = (volatile void __iomem *)
276 		(PAGE_MASK & (unsigned long __force)addr);
277 
278 	mmiotrace_iounmap(addr);
279 
280 	/* Use the vm area unlocked, assuming the caller
281 	   ensures there isn't another iounmap for the same address
282 	   in parallel. Reuse of the virtual address is prevented by
283 	   leaving it in the global lists until we're done with it.
284 	   cpa takes care of the direct mappings. */
285 	p = find_vm_area((void __force *)addr);
286 
287 	if (!p) {
288 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
289 		dump_stack();
290 		return;
291 	}
292 
293 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
294 
295 	/* Finally remove it */
296 	o = remove_vm_area((void __force *)addr);
297 	BUG_ON(p != o || o == NULL);
298 	kfree(p);
299 }
300 EXPORT_SYMBOL(iounmap);
301 
302 /*
303  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
304  * access
305  */
xlate_dev_mem_ptr(unsigned long phys)306 void *xlate_dev_mem_ptr(unsigned long phys)
307 {
308 	void *addr;
309 	unsigned long start = phys & PAGE_MASK;
310 
311 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
312 	if (page_is_ram(start >> PAGE_SHIFT))
313 		return __va(phys);
314 
315 	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
316 	if (addr)
317 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
318 
319 	return addr;
320 }
321 
unxlate_dev_mem_ptr(unsigned long phys,void * addr)322 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
323 {
324 	if (page_is_ram(phys >> PAGE_SHIFT))
325 		return;
326 
327 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
328 	return;
329 }
330 
331 static int __initdata early_ioremap_debug;
332 
early_ioremap_debug_setup(char * str)333 static int __init early_ioremap_debug_setup(char *str)
334 {
335 	early_ioremap_debug = 1;
336 
337 	return 0;
338 }
339 early_param("early_ioremap_debug", early_ioremap_debug_setup);
340 
341 static __initdata int after_paging_init;
342 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
343 
early_ioremap_pmd(unsigned long addr)344 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
345 {
346 	/* Don't assume we're using swapper_pg_dir at this point */
347 	pgd_t *base = __va(read_cr3());
348 	pgd_t *pgd = &base[pgd_index(addr)];
349 	pud_t *pud = pud_offset(pgd, addr);
350 	pmd_t *pmd = pmd_offset(pud, addr);
351 
352 	return pmd;
353 }
354 
early_ioremap_pte(unsigned long addr)355 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
356 {
357 	return &bm_pte[pte_index(addr)];
358 }
359 
is_early_ioremap_ptep(pte_t * ptep)360 bool __init is_early_ioremap_ptep(pte_t *ptep)
361 {
362 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
363 }
364 
365 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
366 
early_ioremap_init(void)367 void __init early_ioremap_init(void)
368 {
369 	pmd_t *pmd;
370 	int i;
371 
372 	if (early_ioremap_debug)
373 		printk(KERN_INFO "early_ioremap_init()\n");
374 
375 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
376 		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
377 
378 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
379 	memset(bm_pte, 0, sizeof(bm_pte));
380 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
381 
382 	/*
383 	 * The boot-ioremap range spans multiple pmds, for which
384 	 * we are not prepared:
385 	 */
386 #define __FIXADDR_TOP (-PAGE_SIZE)
387 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
388 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
389 #undef __FIXADDR_TOP
390 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
391 		WARN_ON(1);
392 		printk(KERN_WARNING "pmd %p != %p\n",
393 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
394 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
395 			fix_to_virt(FIX_BTMAP_BEGIN));
396 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
397 			fix_to_virt(FIX_BTMAP_END));
398 
399 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
400 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
401 		       FIX_BTMAP_BEGIN);
402 	}
403 }
404 
early_ioremap_reset(void)405 void __init early_ioremap_reset(void)
406 {
407 	after_paging_init = 1;
408 }
409 
__early_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)410 static void __init __early_set_fixmap(enum fixed_addresses idx,
411 				      phys_addr_t phys, pgprot_t flags)
412 {
413 	unsigned long addr = __fix_to_virt(idx);
414 	pte_t *pte;
415 
416 	if (idx >= __end_of_fixed_addresses) {
417 		BUG();
418 		return;
419 	}
420 	pte = early_ioremap_pte(addr);
421 
422 	if (pgprot_val(flags))
423 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
424 	else
425 		pte_clear(&init_mm, addr, pte);
426 	__flush_tlb_one(addr);
427 }
428 
early_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t prot)429 static inline void __init early_set_fixmap(enum fixed_addresses idx,
430 					   phys_addr_t phys, pgprot_t prot)
431 {
432 	if (after_paging_init)
433 		__set_fixmap(idx, phys, prot);
434 	else
435 		__early_set_fixmap(idx, phys, prot);
436 }
437 
early_clear_fixmap(enum fixed_addresses idx)438 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
439 {
440 	if (after_paging_init)
441 		clear_fixmap(idx);
442 	else
443 		__early_set_fixmap(idx, 0, __pgprot(0));
444 }
445 
446 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
447 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
448 
fixup_early_ioremap(void)449 void __init fixup_early_ioremap(void)
450 {
451 	int i;
452 
453 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
454 		if (prev_map[i]) {
455 			WARN_ON(1);
456 			break;
457 		}
458 	}
459 
460 	early_ioremap_init();
461 }
462 
check_early_ioremap_leak(void)463 static int __init check_early_ioremap_leak(void)
464 {
465 	int count = 0;
466 	int i;
467 
468 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
469 		if (prev_map[i])
470 			count++;
471 
472 	if (!count)
473 		return 0;
474 	WARN(1, KERN_WARNING
475 	       "Debug warning: early ioremap leak of %d areas detected.\n",
476 		count);
477 	printk(KERN_WARNING
478 		"please boot with early_ioremap_debug and report the dmesg.\n");
479 
480 	return 1;
481 }
482 late_initcall(check_early_ioremap_leak);
483 
484 static void __init __iomem *
__early_ioremap(resource_size_t phys_addr,unsigned long size,pgprot_t prot)485 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
486 {
487 	unsigned long offset;
488 	resource_size_t last_addr;
489 	unsigned int nrpages;
490 	enum fixed_addresses idx0, idx;
491 	int i, slot;
492 
493 	WARN_ON(system_state != SYSTEM_BOOTING);
494 
495 	slot = -1;
496 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
497 		if (!prev_map[i]) {
498 			slot = i;
499 			break;
500 		}
501 	}
502 
503 	if (slot < 0) {
504 		printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
505 			 (u64)phys_addr, size);
506 		WARN_ON(1);
507 		return NULL;
508 	}
509 
510 	if (early_ioremap_debug) {
511 		printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
512 		       (u64)phys_addr, size, slot);
513 		dump_stack();
514 	}
515 
516 	/* Don't allow wraparound or zero size */
517 	last_addr = phys_addr + size - 1;
518 	if (!size || last_addr < phys_addr) {
519 		WARN_ON(1);
520 		return NULL;
521 	}
522 
523 	prev_size[slot] = size;
524 	/*
525 	 * Mappings have to be page-aligned
526 	 */
527 	offset = phys_addr & ~PAGE_MASK;
528 	phys_addr &= PAGE_MASK;
529 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
530 
531 	/*
532 	 * Mappings have to fit in the FIX_BTMAP area.
533 	 */
534 	nrpages = size >> PAGE_SHIFT;
535 	if (nrpages > NR_FIX_BTMAPS) {
536 		WARN_ON(1);
537 		return NULL;
538 	}
539 
540 	/*
541 	 * Ok, go for it..
542 	 */
543 	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
544 	idx = idx0;
545 	while (nrpages > 0) {
546 		early_set_fixmap(idx, phys_addr, prot);
547 		phys_addr += PAGE_SIZE;
548 		--idx;
549 		--nrpages;
550 	}
551 	if (early_ioremap_debug)
552 		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
553 
554 	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
555 	return prev_map[slot];
556 }
557 
558 /* Remap an IO device */
559 void __init __iomem *
early_ioremap(resource_size_t phys_addr,unsigned long size)560 early_ioremap(resource_size_t phys_addr, unsigned long size)
561 {
562 	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
563 }
564 
565 /* Remap memory */
566 void __init __iomem *
early_memremap(resource_size_t phys_addr,unsigned long size)567 early_memremap(resource_size_t phys_addr, unsigned long size)
568 {
569 	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
570 }
571 
early_iounmap(void __iomem * addr,unsigned long size)572 void __init early_iounmap(void __iomem *addr, unsigned long size)
573 {
574 	unsigned long virt_addr;
575 	unsigned long offset;
576 	unsigned int nrpages;
577 	enum fixed_addresses idx;
578 	int i, slot;
579 
580 	slot = -1;
581 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
582 		if (prev_map[i] == addr) {
583 			slot = i;
584 			break;
585 		}
586 	}
587 
588 	if (slot < 0) {
589 		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
590 			 addr, size);
591 		WARN_ON(1);
592 		return;
593 	}
594 
595 	if (prev_size[slot] != size) {
596 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
597 			 addr, size, slot, prev_size[slot]);
598 		WARN_ON(1);
599 		return;
600 	}
601 
602 	if (early_ioremap_debug) {
603 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
604 		       size, slot);
605 		dump_stack();
606 	}
607 
608 	virt_addr = (unsigned long)addr;
609 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
610 		WARN_ON(1);
611 		return;
612 	}
613 	offset = virt_addr & ~PAGE_MASK;
614 	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
615 
616 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
617 	while (nrpages > 0) {
618 		early_clear_fixmap(idx);
619 		--idx;
620 		--nrpages;
621 	}
622 	prev_map[slot] = NULL;
623 }
624