• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/m68k/mm/kmap.c
4  *
5  *  Copyright (C) 1997 Roman Hodek
6  *
7  *  10/01/99 cleaned up the code and changing to the same interface
8  *	     used by other architectures		/Roman Zippel
9  */
10 
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 
19 #include <asm/setup.h>
20 #include <asm/segment.h>
21 #include <asm/page.h>
22 #include <asm/pgalloc.h>
23 #include <asm/io.h>
24 
25 #undef DEBUG
26 
27 #define PTRTREESIZE	(256*1024)
28 
29 /*
30  * For 040/060 we can use the virtual memory area like other architectures,
31  * but for 020/030 we want to use early termination page descriptors and we
32  * can't mix this with normal page descriptors, so we have to copy that code
33  * (mm/vmalloc.c) and return appropriately aligned addresses.
34  */
35 
36 #ifdef CPU_M68040_OR_M68060_ONLY
37 
38 #define IO_SIZE		PAGE_SIZE
39 
get_io_area(unsigned long size)40 static inline struct vm_struct *get_io_area(unsigned long size)
41 {
42 	return get_vm_area(size, VM_IOREMAP);
43 }
44 
45 
free_io_area(void * addr)46 static inline void free_io_area(void *addr)
47 {
48 	vfree((void *)(PAGE_MASK & (unsigned long)addr));
49 }
50 
51 #else
52 
53 #define IO_SIZE		(256*1024)
54 
55 static struct vm_struct *iolist;
56 
get_io_area(unsigned long size)57 static struct vm_struct *get_io_area(unsigned long size)
58 {
59 	unsigned long addr;
60 	struct vm_struct **p, *tmp, *area;
61 
62 	area = kmalloc(sizeof(*area), GFP_KERNEL);
63 	if (!area)
64 		return NULL;
65 	addr = KMAP_START;
66 	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 		if (size + addr < (unsigned long)tmp->addr)
68 			break;
69 		if (addr > KMAP_END-size) {
70 			kfree(area);
71 			return NULL;
72 		}
73 		addr = tmp->size + (unsigned long)tmp->addr;
74 	}
75 	area->addr = (void *)addr;
76 	area->size = size + IO_SIZE;
77 	area->next = *p;
78 	*p = area;
79 	return area;
80 }
81 
free_io_area(void * addr)82 static inline void free_io_area(void *addr)
83 {
84 	struct vm_struct **p, *tmp;
85 
86 	if (!addr)
87 		return;
88 	addr = (void *)((unsigned long)addr & -IO_SIZE);
89 	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 		if (tmp->addr == addr) {
91 			*p = tmp->next;
92 			/* remove gap added in get_io_area() */
93 			__iounmap(tmp->addr, tmp->size - IO_SIZE);
94 			kfree(tmp);
95 			return;
96 		}
97 	}
98 }
99 
100 #endif
101 
102 /*
103  * Map some physical address range into the kernel address space.
104  */
105 /* Rewritten by Andreas Schwab to remove all races. */
106 
__ioremap(unsigned long physaddr,unsigned long size,int cacheflag)107 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
108 {
109 	struct vm_struct *area;
110 	unsigned long virtaddr, retaddr;
111 	long offset;
112 	pgd_t *pgd_dir;
113 	pmd_t *pmd_dir;
114 	pte_t *pte_dir;
115 
116 	/*
117 	 * Don't allow mappings that wrap..
118 	 */
119 	if (!size || physaddr > (unsigned long)(-size))
120 		return NULL;
121 
122 #ifdef CONFIG_AMIGA
123 	if (MACH_IS_AMIGA) {
124 		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
125 		    && (cacheflag == IOMAP_NOCACHE_SER))
126 			return (void __iomem *)physaddr;
127 	}
128 #endif
129 
130 #ifdef DEBUG
131 	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
132 #endif
133 	/*
134 	 * Mappings have to be aligned
135 	 */
136 	offset = physaddr & (IO_SIZE - 1);
137 	physaddr &= -IO_SIZE;
138 	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
139 
140 	/*
141 	 * Ok, go for it..
142 	 */
143 	area = get_io_area(size);
144 	if (!area)
145 		return NULL;
146 
147 	virtaddr = (unsigned long)area->addr;
148 	retaddr = virtaddr + offset;
149 #ifdef DEBUG
150 	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
151 #endif
152 
153 	/*
154 	 * add cache and table flags to physical address
155 	 */
156 	if (CPU_IS_040_OR_060) {
157 		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
158 			     _PAGE_ACCESSED | _PAGE_DIRTY);
159 		switch (cacheflag) {
160 		case IOMAP_FULL_CACHING:
161 			physaddr |= _PAGE_CACHE040;
162 			break;
163 		case IOMAP_NOCACHE_SER:
164 		default:
165 			physaddr |= _PAGE_NOCACHE_S;
166 			break;
167 		case IOMAP_NOCACHE_NONSER:
168 			physaddr |= _PAGE_NOCACHE;
169 			break;
170 		case IOMAP_WRITETHROUGH:
171 			physaddr |= _PAGE_CACHE040W;
172 			break;
173 		}
174 	} else {
175 		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
176 			     _PAGE_DIRTY | _PAGE_READWRITE);
177 		switch (cacheflag) {
178 		case IOMAP_NOCACHE_SER:
179 		case IOMAP_NOCACHE_NONSER:
180 		default:
181 			physaddr |= _PAGE_NOCACHE030;
182 			break;
183 		case IOMAP_FULL_CACHING:
184 		case IOMAP_WRITETHROUGH:
185 			break;
186 		}
187 	}
188 
189 	while ((long)size > 0) {
190 #ifdef DEBUG
191 		if (!(virtaddr & (PTRTREESIZE-1)))
192 			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
193 #endif
194 		pgd_dir = pgd_offset_k(virtaddr);
195 		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
196 		if (!pmd_dir) {
197 			printk("ioremap: no mem for pmd_dir\n");
198 			return NULL;
199 		}
200 
201 		if (CPU_IS_020_OR_030) {
202 			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
203 			physaddr += PTRTREESIZE;
204 			virtaddr += PTRTREESIZE;
205 			size -= PTRTREESIZE;
206 		} else {
207 			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
208 			if (!pte_dir) {
209 				printk("ioremap: no mem for pte_dir\n");
210 				return NULL;
211 			}
212 
213 			pte_val(*pte_dir) = physaddr;
214 			virtaddr += PAGE_SIZE;
215 			physaddr += PAGE_SIZE;
216 			size -= PAGE_SIZE;
217 		}
218 	}
219 #ifdef DEBUG
220 	printk("\n");
221 #endif
222 	flush_tlb_all();
223 
224 	return (void __iomem *)retaddr;
225 }
226 EXPORT_SYMBOL(__ioremap);
227 
228 /*
229  * Unmap an ioremap()ed region again
230  */
iounmap(void __iomem * addr)231 void iounmap(void __iomem *addr)
232 {
233 #ifdef CONFIG_AMIGA
234 	if ((!MACH_IS_AMIGA) ||
235 	    (((unsigned long)addr < 0x40000000) ||
236 	     ((unsigned long)addr > 0x60000000)))
237 			free_io_area((__force void *)addr);
238 #else
239 	free_io_area((__force void *)addr);
240 #endif
241 }
242 EXPORT_SYMBOL(iounmap);
243 
244 /*
245  * __iounmap unmaps nearly everything, so be careful
246  * Currently it doesn't free pointer/page tables anymore but this
247  * wasn't used anyway and might be added later.
248  */
__iounmap(void * addr,unsigned long size)249 void __iounmap(void *addr, unsigned long size)
250 {
251 	unsigned long virtaddr = (unsigned long)addr;
252 	pgd_t *pgd_dir;
253 	pmd_t *pmd_dir;
254 	pte_t *pte_dir;
255 
256 	while ((long)size > 0) {
257 		pgd_dir = pgd_offset_k(virtaddr);
258 		if (pgd_bad(*pgd_dir)) {
259 			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
260 			pgd_clear(pgd_dir);
261 			return;
262 		}
263 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
264 
265 		if (CPU_IS_020_OR_030) {
266 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
267 			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
268 
269 			if (pmd_type == _PAGE_PRESENT) {
270 				pmd_dir->pmd[pmd_off] = 0;
271 				virtaddr += PTRTREESIZE;
272 				size -= PTRTREESIZE;
273 				continue;
274 			} else if (pmd_type == 0)
275 				continue;
276 		}
277 
278 		if (pmd_bad(*pmd_dir)) {
279 			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
280 			pmd_clear(pmd_dir);
281 			return;
282 		}
283 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
284 
285 		pte_val(*pte_dir) = 0;
286 		virtaddr += PAGE_SIZE;
287 		size -= PAGE_SIZE;
288 	}
289 
290 	flush_tlb_all();
291 }
292 
293 /*
294  * Set new cache mode for some kernel address space.
295  * The caller must push data for that range itself, if such data may already
296  * be in the cache.
297  */
kernel_set_cachemode(void * addr,unsigned long size,int cmode)298 void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
299 {
300 	unsigned long virtaddr = (unsigned long)addr;
301 	pgd_t *pgd_dir;
302 	pmd_t *pmd_dir;
303 	pte_t *pte_dir;
304 
305 	if (CPU_IS_040_OR_060) {
306 		switch (cmode) {
307 		case IOMAP_FULL_CACHING:
308 			cmode = _PAGE_CACHE040;
309 			break;
310 		case IOMAP_NOCACHE_SER:
311 		default:
312 			cmode = _PAGE_NOCACHE_S;
313 			break;
314 		case IOMAP_NOCACHE_NONSER:
315 			cmode = _PAGE_NOCACHE;
316 			break;
317 		case IOMAP_WRITETHROUGH:
318 			cmode = _PAGE_CACHE040W;
319 			break;
320 		}
321 	} else {
322 		switch (cmode) {
323 		case IOMAP_NOCACHE_SER:
324 		case IOMAP_NOCACHE_NONSER:
325 		default:
326 			cmode = _PAGE_NOCACHE030;
327 			break;
328 		case IOMAP_FULL_CACHING:
329 		case IOMAP_WRITETHROUGH:
330 			cmode = 0;
331 		}
332 	}
333 
334 	while ((long)size > 0) {
335 		pgd_dir = pgd_offset_k(virtaddr);
336 		if (pgd_bad(*pgd_dir)) {
337 			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
338 			pgd_clear(pgd_dir);
339 			return;
340 		}
341 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
342 
343 		if (CPU_IS_020_OR_030) {
344 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
345 
346 			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
347 				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
348 							 _CACHEMASK040) | cmode;
349 				virtaddr += PTRTREESIZE;
350 				size -= PTRTREESIZE;
351 				continue;
352 			}
353 		}
354 
355 		if (pmd_bad(*pmd_dir)) {
356 			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
357 			pmd_clear(pmd_dir);
358 			return;
359 		}
360 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
361 
362 		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
363 		virtaddr += PAGE_SIZE;
364 		size -= PAGE_SIZE;
365 	}
366 
367 	flush_tlb_all();
368 }
369 EXPORT_SYMBOL(kernel_set_cachemode);
370