• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Re-map IO memory to kernel address space so that we can access it.
4  * This is needed for high PCI addresses that aren't mapped in the
5  * 640k-1MB IO memory area on PC's
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  */
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/io.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 
16 #include "pgalloc-track.h"
17 
18 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
19 static int __read_mostly ioremap_p4d_capable;
20 static int __read_mostly ioremap_pud_capable;
21 static int __read_mostly ioremap_pmd_capable;
22 static int __read_mostly ioremap_huge_disabled;
23 
set_nohugeiomap(char * str)24 static int __init set_nohugeiomap(char *str)
25 {
26 	ioremap_huge_disabled = 1;
27 	return 0;
28 }
29 early_param("nohugeiomap", set_nohugeiomap);
30 
ioremap_huge_init(void)31 void __init ioremap_huge_init(void)
32 {
33 	if (!ioremap_huge_disabled) {
34 		if (arch_ioremap_p4d_supported())
35 			ioremap_p4d_capable = 1;
36 		if (arch_ioremap_pud_supported())
37 			ioremap_pud_capable = 1;
38 		if (arch_ioremap_pmd_supported())
39 			ioremap_pmd_capable = 1;
40 	}
41 }
42 
ioremap_p4d_enabled(void)43 static inline int ioremap_p4d_enabled(void)
44 {
45 	return ioremap_p4d_capable;
46 }
47 
ioremap_pud_enabled(void)48 static inline int ioremap_pud_enabled(void)
49 {
50 	return ioremap_pud_capable;
51 }
52 
ioremap_pmd_enabled(void)53 static inline int ioremap_pmd_enabled(void)
54 {
55 	return ioremap_pmd_capable;
56 }
57 
58 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
ioremap_p4d_enabled(void)59 static inline int ioremap_p4d_enabled(void) { return 0; }
ioremap_pud_enabled(void)60 static inline int ioremap_pud_enabled(void) { return 0; }
ioremap_pmd_enabled(void)61 static inline int ioremap_pmd_enabled(void) { return 0; }
62 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 
ioremap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,pgtbl_mod_mask * mask)64 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
65 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
66 		pgtbl_mod_mask *mask)
67 {
68 	pte_t *pte;
69 	u64 pfn;
70 
71 	pfn = phys_addr >> PAGE_SHIFT;
72 	pte = pte_alloc_kernel_track(pmd, addr, mask);
73 	if (!pte)
74 		return -ENOMEM;
75 	do {
76 		BUG_ON(!pte_none(*pte));
77 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
78 		pfn++;
79 	} while (pte++, addr += PAGE_SIZE, addr != end);
80 	*mask |= PGTBL_PTE_MODIFIED;
81 	return 0;
82 }
83 
ioremap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)84 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
85 				unsigned long end, phys_addr_t phys_addr,
86 				pgprot_t prot)
87 {
88 	if (!ioremap_pmd_enabled())
89 		return 0;
90 
91 	if ((end - addr) != PMD_SIZE)
92 		return 0;
93 
94 	if (!IS_ALIGNED(addr, PMD_SIZE))
95 		return 0;
96 
97 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
98 		return 0;
99 
100 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
101 		return 0;
102 
103 	return pmd_set_huge(pmd, phys_addr, prot);
104 }
105 
ioremap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,pgtbl_mod_mask * mask)106 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
107 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
108 		pgtbl_mod_mask *mask)
109 {
110 	pmd_t *pmd;
111 	unsigned long next;
112 
113 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
114 	if (!pmd)
115 		return -ENOMEM;
116 	do {
117 		next = pmd_addr_end(addr, end);
118 
119 		if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
120 			*mask |= PGTBL_PMD_MODIFIED;
121 			continue;
122 		}
123 
124 		if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
125 			return -ENOMEM;
126 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
127 	return 0;
128 }
129 
ioremap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)130 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
131 				unsigned long end, phys_addr_t phys_addr,
132 				pgprot_t prot)
133 {
134 	if (!ioremap_pud_enabled())
135 		return 0;
136 
137 	if ((end - addr) != PUD_SIZE)
138 		return 0;
139 
140 	if (!IS_ALIGNED(addr, PUD_SIZE))
141 		return 0;
142 
143 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
144 		return 0;
145 
146 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
147 		return 0;
148 
149 	return pud_set_huge(pud, phys_addr, prot);
150 }
151 
ioremap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,pgtbl_mod_mask * mask)152 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
153 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
154 		pgtbl_mod_mask *mask)
155 {
156 	pud_t *pud;
157 	unsigned long next;
158 
159 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
160 	if (!pud)
161 		return -ENOMEM;
162 	do {
163 		next = pud_addr_end(addr, end);
164 
165 		if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
166 			*mask |= PGTBL_PUD_MODIFIED;
167 			continue;
168 		}
169 
170 		if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
171 			return -ENOMEM;
172 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
173 	return 0;
174 }
175 
ioremap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)176 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
177 				unsigned long end, phys_addr_t phys_addr,
178 				pgprot_t prot)
179 {
180 	if (!ioremap_p4d_enabled())
181 		return 0;
182 
183 	if ((end - addr) != P4D_SIZE)
184 		return 0;
185 
186 	if (!IS_ALIGNED(addr, P4D_SIZE))
187 		return 0;
188 
189 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
190 		return 0;
191 
192 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
193 		return 0;
194 
195 	return p4d_set_huge(p4d, phys_addr, prot);
196 }
197 
ioremap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,pgtbl_mod_mask * mask)198 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
199 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
200 		pgtbl_mod_mask *mask)
201 {
202 	p4d_t *p4d;
203 	unsigned long next;
204 
205 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
206 	if (!p4d)
207 		return -ENOMEM;
208 	do {
209 		next = p4d_addr_end(addr, end);
210 
211 		if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
212 			*mask |= PGTBL_P4D_MODIFIED;
213 			continue;
214 		}
215 
216 		if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
217 			return -ENOMEM;
218 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
219 	return 0;
220 }
221 
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)222 int ioremap_page_range(unsigned long addr,
223 		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
224 {
225 	pgd_t *pgd;
226 	unsigned long start;
227 	unsigned long next;
228 	phys_addr_t phys_start = phys_addr;
229 	int err;
230 	pgtbl_mod_mask mask = 0;
231 
232 	might_sleep();
233 	BUG_ON(addr >= end);
234 
235 	start = addr;
236 	pgd = pgd_offset_k(addr);
237 	do {
238 		next = pgd_addr_end(addr, end);
239 		err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
240 					&mask);
241 		if (err)
242 			break;
243 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
244 
245 	flush_cache_vmap(start, end);
246 
247 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
248 		arch_sync_kernel_mappings(start, end);
249 
250 	if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) && !err)
251 		ioremap_phys_range_hook(phys_start, end - start, prot);
252 
253 	return err;
254 }
255 
256 #ifdef CONFIG_GENERIC_IOREMAP
ioremap_prot(phys_addr_t addr,size_t size,unsigned long prot)257 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
258 {
259 	unsigned long offset, vaddr;
260 	phys_addr_t last_addr;
261 	struct vm_struct *area;
262 
263 	/* Disallow wrap-around or zero size */
264 	last_addr = addr + size - 1;
265 	if (!size || last_addr < addr)
266 		return NULL;
267 
268 	/* Page-align mappings */
269 	offset = addr & (~PAGE_MASK);
270 	addr -= offset;
271 	size = PAGE_ALIGN(size + offset);
272 
273 	area = get_vm_area_caller(size, VM_IOREMAP,
274 			__builtin_return_address(0));
275 	if (!area)
276 		return NULL;
277 	vaddr = (unsigned long)area->addr;
278 
279 	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
280 		free_vm_area(area);
281 		return NULL;
282 	}
283 
284 	return (void __iomem *)(vaddr + offset);
285 }
286 EXPORT_SYMBOL(ioremap_prot);
287 
iounmap(volatile void __iomem * addr)288 void iounmap(volatile void __iomem *addr)
289 {
290 	vunmap((void *)((unsigned long)addr & PAGE_MASK));
291 }
292 EXPORT_SYMBOL(iounmap);
293 #endif /* CONFIG_GENERIC_IOREMAP */
294