1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/ioremap.c
4 *
5 * (C) Copyright 1995 1996 Linus Torvalds
6 * Hacked for ARM by Phil Blundell <philb@gnu.org>
7 * Hacked to allow all architectures to build, and various cleanups
8 * by Russell King
9 * Copyright (C) 2012 ARM Ltd.
10 */
11
12 #define pr_fmt(fmt) "ioremap: " fmt
13
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/memblock.h>
20 #include <linux/arm-smccc.h>
21
22 #include <asm/fixmap.h>
23 #include <asm/tlbflush.h>
24 #include <asm/hypervisor.h>
25
26 struct ioremap_guard_ref {
27 refcount_t count;
28 };
29
30 static DEFINE_STATIC_KEY_FALSE(ioremap_guard_key);
31 static DEFINE_XARRAY(ioremap_guard_array);
32 static DEFINE_MUTEX(ioremap_guard_lock);
33
34 static bool ioremap_guard;
ioremap_guard_setup(char * str)35 static int __init ioremap_guard_setup(char *str)
36 {
37 ioremap_guard = true;
38
39 return 0;
40 }
41 early_param("ioremap_guard", ioremap_guard_setup);
42
fixup_fixmap(void)43 static void fixup_fixmap(void)
44 {
45 pte_t *ptep = __get_fixmap_pte(FIX_EARLYCON_MEM_BASE);
46
47 if (!ptep)
48 return;
49
50 ioremap_phys_range_hook(__pte_to_phys(*ptep), PAGE_SIZE,
51 __pgprot(pte_val(*ptep) & PTE_ATTRINDX_MASK));
52 }
53
kvm_init_ioremap_services(void)54 void kvm_init_ioremap_services(void)
55 {
56 struct arm_smccc_res res;
57
58 if (!ioremap_guard)
59 return;
60
61 /* We need all the functions to be implemented */
62 if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO) ||
63 !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL) ||
64 !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP) ||
65 !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP))
66 return;
67
68 arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID,
69 0, 0, 0, &res);
70 if (res.a0 != PAGE_SIZE)
71 return;
72
73 arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID,
74 &res);
75 if (res.a0 == SMCCC_RET_SUCCESS) {
76 static_branch_enable(&ioremap_guard_key);
77 fixup_fixmap();
78 pr_info("Using KVM MMIO guard for ioremap\n");
79 } else {
80 pr_warn("KVM MMIO guard registration failed (%ld)\n", res.a0);
81 }
82 }
83
ioremap_phys_range_hook(phys_addr_t phys_addr,size_t size,pgprot_t prot)84 void ioremap_phys_range_hook(phys_addr_t phys_addr, size_t size, pgprot_t prot)
85 {
86 if (!static_branch_unlikely(&ioremap_guard_key))
87 return;
88
89 if (pfn_valid(__phys_to_pfn(phys_addr)))
90 return;
91
92 mutex_lock(&ioremap_guard_lock);
93
94 while (size) {
95 u64 pfn = phys_addr >> PAGE_SHIFT;
96 struct ioremap_guard_ref *ref;
97 struct arm_smccc_res res;
98
99 ref = xa_load(&ioremap_guard_array, pfn);
100 if (ref) {
101 refcount_inc(&ref->count);
102 goto next;
103 }
104
105 /*
106 * It is acceptable for the allocation to fail, specially
107 * if trying to ioremap something very early on, like with
108 * earlycon, which happens long before kmem_cache_init.
109 * This page will be permanently accessible, similar to a
110 * saturated refcount.
111 */
112 if (slab_is_available())
113 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
114 if (ref) {
115 refcount_set(&ref->count, 1);
116 if (xa_err(xa_store(&ioremap_guard_array, pfn, ref,
117 GFP_KERNEL))) {
118 kfree(ref);
119 ref = NULL;
120 }
121 }
122
123 arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID,
124 phys_addr, prot, &res);
125 if (res.a0 != SMCCC_RET_SUCCESS) {
126 pr_warn_ratelimited("Failed to register %llx\n",
127 phys_addr);
128 xa_erase(&ioremap_guard_array, pfn);
129 kfree(ref);
130 goto out;
131 }
132
133 next:
134 size -= PAGE_SIZE;
135 phys_addr += PAGE_SIZE;
136 }
137 out:
138 mutex_unlock(&ioremap_guard_lock);
139 }
140
iounmap_phys_range_hook(phys_addr_t phys_addr,size_t size)141 void iounmap_phys_range_hook(phys_addr_t phys_addr, size_t size)
142 {
143 if (!static_branch_unlikely(&ioremap_guard_key))
144 return;
145
146 VM_BUG_ON(phys_addr & ~PAGE_MASK || size & ~PAGE_MASK);
147
148 mutex_lock(&ioremap_guard_lock);
149
150 while (size) {
151 u64 pfn = phys_addr >> PAGE_SHIFT;
152 struct ioremap_guard_ref *ref;
153 struct arm_smccc_res res;
154
155 ref = xa_load(&ioremap_guard_array, pfn);
156 if (!ref) {
157 pr_warn_ratelimited("%llx not tracked, left mapped\n",
158 phys_addr);
159 goto next;
160 }
161
162 if (!refcount_dec_and_test(&ref->count))
163 goto next;
164
165 xa_erase(&ioremap_guard_array, pfn);
166 kfree(ref);
167
168 arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID,
169 phys_addr, &res);
170 if (res.a0 != SMCCC_RET_SUCCESS) {
171 pr_warn_ratelimited("Failed to unregister %llx\n",
172 phys_addr);
173 goto out;
174 }
175
176 next:
177 size -= PAGE_SIZE;
178 phys_addr += PAGE_SIZE;
179 }
180 out:
181 mutex_unlock(&ioremap_guard_lock);
182 }
183
__ioremap_caller(phys_addr_t phys_addr,size_t size,pgprot_t prot,void * caller)184 static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
185 pgprot_t prot, void *caller)
186 {
187 unsigned long last_addr;
188 unsigned long offset = phys_addr & ~PAGE_MASK;
189 int err;
190 unsigned long addr;
191 struct vm_struct *area;
192
193 /*
194 * Page align the mapping address and size, taking account of any
195 * offset.
196 */
197 phys_addr &= PAGE_MASK;
198 size = PAGE_ALIGN(size + offset);
199
200 /*
201 * Don't allow wraparound, zero size or outside PHYS_MASK.
202 */
203 last_addr = phys_addr + size - 1;
204 if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
205 return NULL;
206
207 /*
208 * Don't allow RAM to be mapped.
209 */
210 if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
211 return NULL;
212
213 area = get_vm_area_caller(size, VM_IOREMAP, caller);
214 if (!area)
215 return NULL;
216 addr = (unsigned long)area->addr;
217 area->phys_addr = phys_addr;
218
219 err = ioremap_page_range(addr, addr + size, phys_addr, prot);
220 if (err) {
221 vunmap((void *)addr);
222 return NULL;
223 }
224
225 return (void __iomem *)(offset + addr);
226 }
227
__ioremap(phys_addr_t phys_addr,size_t size,pgprot_t prot)228 void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
229 {
230 return __ioremap_caller(phys_addr, size, prot,
231 __builtin_return_address(0));
232 }
233 EXPORT_SYMBOL(__ioremap);
234
iounmap(volatile void __iomem * io_addr)235 void iounmap(volatile void __iomem *io_addr)
236 {
237 unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
238
239 /*
240 * We could get an address outside vmalloc range in case
241 * of ioremap_cache() reusing a RAM mapping.
242 */
243 if (is_vmalloc_addr((void *)addr))
244 vunmap((void *)addr);
245 }
246 EXPORT_SYMBOL(iounmap);
247
ioremap_cache(phys_addr_t phys_addr,size_t size)248 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
249 {
250 /* For normal memory we already have a cacheable mapping. */
251 if (pfn_valid(__phys_to_pfn(phys_addr)))
252 return (void __iomem *)__phys_to_virt(phys_addr);
253
254 return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
255 __builtin_return_address(0));
256 }
257 EXPORT_SYMBOL(ioremap_cache);
258
259 /*
260 * Must be called after early_fixmap_init
261 */
early_ioremap_init(void)262 void __init early_ioremap_init(void)
263 {
264 early_ioremap_setup();
265 }
266
arch_memremap_can_ram_remap(resource_size_t offset,size_t size,unsigned long flags)267 bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
268 unsigned long flags)
269 {
270 unsigned long pfn = PHYS_PFN(offset);
271
272 return memblock_is_map_memory(pfn);
273 }
274