1 /*
2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/compiler.h>
11 #include <linux/module.h>
12 #include <linux/efi.h>
13 #include <linux/io.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/meminit.h>
17
18 static inline void __iomem *
__ioremap(unsigned long phys_addr)19 __ioremap (unsigned long phys_addr)
20 {
21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
22 }
23
24 void __iomem *
ioremap(unsigned long phys_addr,unsigned long size)25 ioremap (unsigned long phys_addr, unsigned long size)
26 {
27 void __iomem *addr;
28 struct vm_struct *area;
29 unsigned long offset;
30 pgprot_t prot;
31 u64 attr;
32 unsigned long gran_base, gran_size;
33 unsigned long page_base;
34
35 /*
36 * For things in kern_memmap, we must use the same attribute
37 * as the rest of the kernel. For more details, see
38 * Documentation/ia64/aliasing.txt.
39 */
40 attr = kern_mem_attribute(phys_addr, size);
41 if (attr & EFI_MEMORY_WB)
42 return (void __iomem *) phys_to_virt(phys_addr);
43 else if (attr & EFI_MEMORY_UC)
44 return __ioremap(phys_addr);
45
46 /*
47 * Some chipsets don't support UC access to memory. If
48 * WB is supported for the whole granule, we prefer that.
49 */
50 gran_base = GRANULEROUNDDOWN(phys_addr);
51 gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
52 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * WB is not supported for the whole granule, so we can't use
57 * the region 7 identity mapping. If we can safely cover the
58 * area with kernel page table mappings, we can use those
59 * instead.
60 */
61 page_base = phys_addr & PAGE_MASK;
62 size = PAGE_ALIGN(phys_addr + size) - page_base;
63 if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
64 prot = PAGE_KERNEL;
65
66 /*
67 * Mappings have to be page-aligned
68 */
69 offset = phys_addr & ~PAGE_MASK;
70 phys_addr &= PAGE_MASK;
71
72 /*
73 * Ok, go for it..
74 */
75 area = get_vm_area(size, VM_IOREMAP);
76 if (!area)
77 return NULL;
78
79 area->phys_addr = phys_addr;
80 addr = (void __iomem *) area->addr;
81 if (ioremap_page_range((unsigned long) addr,
82 (unsigned long) addr + size, phys_addr, prot)) {
83 vunmap((void __force *) addr);
84 return NULL;
85 }
86
87 return (void __iomem *) (offset + (char __iomem *)addr);
88 }
89
90 return __ioremap(phys_addr);
91 }
92 EXPORT_SYMBOL(ioremap);
93
94 void __iomem *
ioremap_nocache(unsigned long phys_addr,unsigned long size)95 ioremap_nocache (unsigned long phys_addr, unsigned long size)
96 {
97 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
98 return NULL;
99
100 return __ioremap(phys_addr);
101 }
102 EXPORT_SYMBOL(ioremap_nocache);
103
104 void
iounmap(volatile void __iomem * addr)105 iounmap (volatile void __iomem *addr)
106 {
107 if (REGION_NUMBER(addr) == RGN_GATE)
108 vunmap((void *) ((unsigned long) addr & PAGE_MASK));
109 }
110 EXPORT_SYMBOL(iounmap);
111