• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/arch/m68k/mm/motorola.c
3  *
4  * Routines specific to the Motorola MMU, originally from:
5  * linux/arch/m68k/init.c
6  * which are Copyright (C) 1995 Hamish Macdonald
7  *
8  * Moved 8/20/1999 Sam Creasey
9  */
10 
11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/gfp.h>
22 
23 #include <asm/setup.h>
24 #include <asm/uaccess.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/machdep.h>
28 #include <asm/io.h>
29 #include <asm/dma.h>
30 #ifdef CONFIG_ATARI
31 #include <asm/atari_stram.h>
32 #endif
33 #include <asm/sections.h>
34 
35 #undef DEBUG
36 
37 #ifndef mm_cachebits
38 /*
39  * Bits to add to page descriptors for "normal" caching mode.
40  * For 68020/030 this is 0.
41  * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
42  */
43 unsigned long mm_cachebits;
44 EXPORT_SYMBOL(mm_cachebits);
45 #endif
46 
47 /* size of memory already mapped in head.S */
48 extern __initdata unsigned long m68k_init_mapped_size;
49 
50 extern unsigned long availmem;
51 
kernel_page_table(void)52 static pte_t * __init kernel_page_table(void)
53 {
54 	pte_t *ptablep;
55 
56 	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
57 
58 	clear_page(ptablep);
59 	__flush_page_to_ram(ptablep);
60 	flush_tlb_kernel_page(ptablep);
61 	nocache_page(ptablep);
62 
63 	return ptablep;
64 }
65 
66 static pmd_t *last_pgtable __initdata = NULL;
67 pmd_t *zero_pgtable __initdata = NULL;
68 
kernel_ptr_table(void)69 static pmd_t * __init kernel_ptr_table(void)
70 {
71 	if (!last_pgtable) {
72 		unsigned long pmd, last;
73 		int i;
74 
75 		/* Find the last ptr table that was used in head.S and
76 		 * reuse the remaining space in that page for further
77 		 * ptr tables.
78 		 */
79 		last = (unsigned long)kernel_pg_dir;
80 		for (i = 0; i < PTRS_PER_PGD; i++) {
81 			if (!pgd_present(kernel_pg_dir[i]))
82 				continue;
83 			pmd = __pgd_page(kernel_pg_dir[i]);
84 			if (pmd > last)
85 				last = pmd;
86 		}
87 
88 		last_pgtable = (pmd_t *)last;
89 #ifdef DEBUG
90 		printk("kernel_ptr_init: %p\n", last_pgtable);
91 #endif
92 	}
93 
94 	last_pgtable += PTRS_PER_PMD;
95 	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
96 		last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97 
98 		clear_page(last_pgtable);
99 		__flush_page_to_ram(last_pgtable);
100 		flush_tlb_kernel_page(last_pgtable);
101 		nocache_page(last_pgtable);
102 	}
103 
104 	return last_pgtable;
105 }
106 
map_node(int node)107 static void __init map_node(int node)
108 {
109 #define PTRTREESIZE (256*1024)
110 #define ROOTTREESIZE (32*1024*1024)
111 	unsigned long physaddr, virtaddr, size;
112 	pgd_t *pgd_dir;
113 	pmd_t *pmd_dir;
114 	pte_t *pte_dir;
115 
116 	size = m68k_memory[node].size;
117 	physaddr = m68k_memory[node].addr;
118 	virtaddr = (unsigned long)phys_to_virt(physaddr);
119 	physaddr |= m68k_supervisor_cachemode |
120 		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
121 	if (CPU_IS_040_OR_060)
122 		physaddr |= _PAGE_GLOBAL040;
123 
124 	while (size > 0) {
125 #ifdef DEBUG
126 		if (!(virtaddr & (PTRTREESIZE-1)))
127 			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
128 				virtaddr);
129 #endif
130 		pgd_dir = pgd_offset_k(virtaddr);
131 		if (virtaddr && CPU_IS_020_OR_030) {
132 			if (!(virtaddr & (ROOTTREESIZE-1)) &&
133 			    size >= ROOTTREESIZE) {
134 #ifdef DEBUG
135 				printk ("[very early term]");
136 #endif
137 				pgd_val(*pgd_dir) = physaddr;
138 				size -= ROOTTREESIZE;
139 				virtaddr += ROOTTREESIZE;
140 				physaddr += ROOTTREESIZE;
141 				continue;
142 			}
143 		}
144 		if (!pgd_present(*pgd_dir)) {
145 			pmd_dir = kernel_ptr_table();
146 #ifdef DEBUG
147 			printk ("[new pointer %p]", pmd_dir);
148 #endif
149 			pgd_set(pgd_dir, pmd_dir);
150 		} else
151 			pmd_dir = pmd_offset(pgd_dir, virtaddr);
152 
153 		if (CPU_IS_020_OR_030) {
154 			if (virtaddr) {
155 #ifdef DEBUG
156 				printk ("[early term]");
157 #endif
158 				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
159 				physaddr += PTRTREESIZE;
160 			} else {
161 				int i;
162 #ifdef DEBUG
163 				printk ("[zero map]");
164 #endif
165 				zero_pgtable = kernel_ptr_table();
166 				pte_dir = (pte_t *)zero_pgtable;
167 				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
168 					_PAGE_TABLE | _PAGE_ACCESSED;
169 				pte_val(*pte_dir++) = 0;
170 				physaddr += PAGE_SIZE;
171 				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
172 					pte_val(*pte_dir++) = physaddr;
173 			}
174 			size -= PTRTREESIZE;
175 			virtaddr += PTRTREESIZE;
176 		} else {
177 			if (!pmd_present(*pmd_dir)) {
178 #ifdef DEBUG
179 				printk ("[new table]");
180 #endif
181 				pte_dir = kernel_page_table();
182 				pmd_set(pmd_dir, pte_dir);
183 			}
184 			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
185 
186 			if (virtaddr) {
187 				if (!pte_present(*pte_dir))
188 					pte_val(*pte_dir) = physaddr;
189 			} else
190 				pte_val(*pte_dir) = 0;
191 			size -= PAGE_SIZE;
192 			virtaddr += PAGE_SIZE;
193 			physaddr += PAGE_SIZE;
194 		}
195 
196 	}
197 #ifdef DEBUG
198 	printk("\n");
199 #endif
200 }
201 
202 /*
203  * paging_init() continues the virtual memory environment setup which
204  * was begun by the code in arch/head.S.
205  */
paging_init(void)206 void __init paging_init(void)
207 {
208 	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
209 	unsigned long min_addr, max_addr;
210 	unsigned long addr, size, end;
211 	int i;
212 
213 #ifdef DEBUG
214 	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
215 #endif
216 
217 	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
218 	if (CPU_IS_040_OR_060) {
219 		int i;
220 #ifndef mm_cachebits
221 		mm_cachebits = _PAGE_CACHE040;
222 #endif
223 		for (i = 0; i < 16; i++)
224 			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
225 	}
226 
227 	min_addr = m68k_memory[0].addr;
228 	max_addr = min_addr + m68k_memory[0].size;
229 	for (i = 1; i < m68k_num_memory;) {
230 		if (m68k_memory[i].addr < min_addr) {
231 			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
232 				m68k_memory[i].addr, m68k_memory[i].size);
233 			printk("Fix your bootloader or use a memfile to make use of this area!\n");
234 			m68k_num_memory--;
235 			memmove(m68k_memory + i, m68k_memory + i + 1,
236 				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
237 			continue;
238 		}
239 		addr = m68k_memory[i].addr + m68k_memory[i].size;
240 		if (addr > max_addr)
241 			max_addr = addr;
242 		i++;
243 	}
244 	m68k_memoffset = min_addr - PAGE_OFFSET;
245 	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
246 
247 	module_fixup(NULL, __start_fixup, __stop_fixup);
248 	flush_icache();
249 
250 	high_memory = phys_to_virt(max_addr);
251 
252 	min_low_pfn = availmem >> PAGE_SHIFT;
253 	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
254 
255 	for (i = 0; i < m68k_num_memory; i++) {
256 		addr = m68k_memory[i].addr;
257 		end = addr + m68k_memory[i].size;
258 		m68k_setup_node(i);
259 		availmem = PAGE_ALIGN(availmem);
260 		availmem += init_bootmem_node(NODE_DATA(i),
261 					      availmem >> PAGE_SHIFT,
262 					      addr >> PAGE_SHIFT,
263 					      end >> PAGE_SHIFT);
264 	}
265 
266 	/*
267 	 * Map the physical memory available into the kernel virtual
268 	 * address space. First initialize the bootmem allocator with
269 	 * the memory we already mapped, so map_node() has something
270 	 * to allocate.
271 	 */
272 	addr = m68k_memory[0].addr;
273 	size = m68k_memory[0].size;
274 	free_bootmem_node(NODE_DATA(0), availmem,
275 			  min(m68k_init_mapped_size, size) - (availmem - addr));
276 	map_node(0);
277 	if (size > m68k_init_mapped_size)
278 		free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
279 				  size - m68k_init_mapped_size);
280 
281 	for (i = 1; i < m68k_num_memory; i++)
282 		map_node(i);
283 
284 	flush_tlb_all();
285 
286 	/*
287 	 * initialize the bad page table and bad page to point
288 	 * to a couple of allocated pages
289 	 */
290 	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
291 
292 	/*
293 	 * Set up SFC/DFC registers
294 	 */
295 	set_fs(KERNEL_DS);
296 
297 #ifdef DEBUG
298 	printk ("before free_area_init\n");
299 #endif
300 	for (i = 0; i < m68k_num_memory; i++) {
301 		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
302 		free_area_init_node(i, zones_size,
303 				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
304 		if (node_present_pages(i))
305 			node_set_state(i, N_NORMAL_MEMORY);
306 	}
307 }
308 
309