• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/parisc/mm/init.c
3  *
4  *  Copyright (C) 1995	Linus Torvalds
5  *  Copyright 1999 SuSE GmbH
6  *    changed by Philipp Rumpf
7  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8  *  Copyright 2004 Randolph Chung (tausq@debian.org)
9  *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
10  *
11  */
12 
13 
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/bootmem.h>
17 #include <linux/gfp.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/pci.h>		/* for hppa_dma_ops and pcxl_dma_ops */
21 #include <linux/initrd.h>
22 #include <linux/swap.h>
23 #include <linux/unistd.h>
24 #include <linux/nodemask.h>	/* for node_online_map */
25 #include <linux/pagemap.h>	/* for release_pages and page_cache_release */
26 #include <linux/compat.h>
27 
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31 #include <asm/pdc_chassis.h>
32 #include <asm/mmzone.h>
33 #include <asm/sections.h>
34 #include <asm/msgbuf.h>
35 
36 extern int  data_start;
37 extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
38 
39 #if CONFIG_PGTABLE_LEVELS == 3
40 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
41  * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
42  * guarantee that global objects will be laid out in memory in the same order
43  * as the order of declaration, so put these in different sections and use
44  * the linker script to order them. */
45 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
46 #endif
47 
48 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
49 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
50 
51 #ifdef CONFIG_DISCONTIGMEM
52 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
53 signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
54 #endif
55 
56 static struct resource data_resource = {
57 	.name	= "Kernel data",
58 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
59 };
60 
61 static struct resource code_resource = {
62 	.name	= "Kernel code",
63 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
64 };
65 
66 static struct resource pdcdata_resource = {
67 	.name	= "PDC data (Page Zero)",
68 	.start	= 0,
69 	.end	= 0x9ff,
70 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
71 };
72 
73 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
74 
75 /* The following array is initialized from the firmware specific
76  * information retrieved in kernel/inventory.c.
77  */
78 
79 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
80 int npmem_ranges __read_mostly;
81 
82 #ifdef CONFIG_64BIT
83 #define MAX_MEM         (~0UL)
84 #else /* !CONFIG_64BIT */
85 #define MAX_MEM         (3584U*1024U*1024U)
86 #endif /* !CONFIG_64BIT */
87 
88 static unsigned long mem_limit __read_mostly = MAX_MEM;
89 
mem_limit_func(void)90 static void __init mem_limit_func(void)
91 {
92 	char *cp, *end;
93 	unsigned long limit;
94 
95 	/* We need this before __setup() functions are called */
96 
97 	limit = MAX_MEM;
98 	for (cp = boot_command_line; *cp; ) {
99 		if (memcmp(cp, "mem=", 4) == 0) {
100 			cp += 4;
101 			limit = memparse(cp, &end);
102 			if (end != cp)
103 				break;
104 			cp = end;
105 		} else {
106 			while (*cp != ' ' && *cp)
107 				++cp;
108 			while (*cp == ' ')
109 				++cp;
110 		}
111 	}
112 
113 	if (limit < mem_limit)
114 		mem_limit = limit;
115 }
116 
117 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
118 
setup_bootmem(void)119 static void __init setup_bootmem(void)
120 {
121 	unsigned long bootmap_size;
122 	unsigned long mem_max;
123 	unsigned long bootmap_pages;
124 	unsigned long bootmap_start_pfn;
125 	unsigned long bootmap_pfn;
126 #ifndef CONFIG_DISCONTIGMEM
127 	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
128 	int npmem_holes;
129 #endif
130 	int i, sysram_resource_count;
131 
132 	disable_sr_hashing(); /* Turn off space register hashing */
133 
134 	/*
135 	 * Sort the ranges. Since the number of ranges is typically
136 	 * small, and performance is not an issue here, just do
137 	 * a simple insertion sort.
138 	 */
139 
140 	for (i = 1; i < npmem_ranges; i++) {
141 		int j;
142 
143 		for (j = i; j > 0; j--) {
144 			unsigned long tmp;
145 
146 			if (pmem_ranges[j-1].start_pfn <
147 			    pmem_ranges[j].start_pfn) {
148 
149 				break;
150 			}
151 			tmp = pmem_ranges[j-1].start_pfn;
152 			pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
153 			pmem_ranges[j].start_pfn = tmp;
154 			tmp = pmem_ranges[j-1].pages;
155 			pmem_ranges[j-1].pages = pmem_ranges[j].pages;
156 			pmem_ranges[j].pages = tmp;
157 		}
158 	}
159 
160 #ifndef CONFIG_DISCONTIGMEM
161 	/*
162 	 * Throw out ranges that are too far apart (controlled by
163 	 * MAX_GAP).
164 	 */
165 
166 	for (i = 1; i < npmem_ranges; i++) {
167 		if (pmem_ranges[i].start_pfn -
168 			(pmem_ranges[i-1].start_pfn +
169 			 pmem_ranges[i-1].pages) > MAX_GAP) {
170 			npmem_ranges = i;
171 			printk("Large gap in memory detected (%ld pages). "
172 			       "Consider turning on CONFIG_DISCONTIGMEM\n",
173 			       pmem_ranges[i].start_pfn -
174 			       (pmem_ranges[i-1].start_pfn +
175 			        pmem_ranges[i-1].pages));
176 			break;
177 		}
178 	}
179 #endif
180 
181 	if (npmem_ranges > 1) {
182 
183 		/* Print the memory ranges */
184 
185 		printk(KERN_INFO "Memory Ranges:\n");
186 
187 		for (i = 0; i < npmem_ranges; i++) {
188 			unsigned long start;
189 			unsigned long size;
190 
191 			size = (pmem_ranges[i].pages << PAGE_SHIFT);
192 			start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
193 			printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
194 				i,start, start + (size - 1), size >> 20);
195 		}
196 	}
197 
198 	sysram_resource_count = npmem_ranges;
199 	for (i = 0; i < sysram_resource_count; i++) {
200 		struct resource *res = &sysram_resources[i];
201 		res->name = "System RAM";
202 		res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
203 		res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
204 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
205 		request_resource(&iomem_resource, res);
206 	}
207 
208 	/*
209 	 * For 32 bit kernels we limit the amount of memory we can
210 	 * support, in order to preserve enough kernel address space
211 	 * for other purposes. For 64 bit kernels we don't normally
212 	 * limit the memory, but this mechanism can be used to
213 	 * artificially limit the amount of memory (and it is written
214 	 * to work with multiple memory ranges).
215 	 */
216 
217 	mem_limit_func();       /* check for "mem=" argument */
218 
219 	mem_max = 0;
220 	for (i = 0; i < npmem_ranges; i++) {
221 		unsigned long rsize;
222 
223 		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
224 		if ((mem_max + rsize) > mem_limit) {
225 			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
226 			if (mem_max == mem_limit)
227 				npmem_ranges = i;
228 			else {
229 				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
230 						       - (mem_max >> PAGE_SHIFT);
231 				npmem_ranges = i + 1;
232 				mem_max = mem_limit;
233 			}
234 			break;
235 		}
236 		mem_max += rsize;
237 	}
238 
239 	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
240 
241 #ifndef CONFIG_DISCONTIGMEM
242 	/* Merge the ranges, keeping track of the holes */
243 
244 	{
245 		unsigned long end_pfn;
246 		unsigned long hole_pages;
247 
248 		npmem_holes = 0;
249 		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
250 		for (i = 1; i < npmem_ranges; i++) {
251 
252 			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
253 			if (hole_pages) {
254 				pmem_holes[npmem_holes].start_pfn = end_pfn;
255 				pmem_holes[npmem_holes++].pages = hole_pages;
256 				end_pfn += hole_pages;
257 			}
258 			end_pfn += pmem_ranges[i].pages;
259 		}
260 
261 		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
262 		npmem_ranges = 1;
263 	}
264 #endif
265 
266 	bootmap_pages = 0;
267 	for (i = 0; i < npmem_ranges; i++)
268 		bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
269 
270 	bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
271 
272 #ifdef CONFIG_DISCONTIGMEM
273 	for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
274 		memset(NODE_DATA(i), 0, sizeof(pg_data_t));
275 		NODE_DATA(i)->bdata = &bootmem_node_data[i];
276 	}
277 	memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
278 
279 	for (i = 0; i < npmem_ranges; i++) {
280 		node_set_state(i, N_NORMAL_MEMORY);
281 		node_set_online(i);
282 	}
283 #endif
284 
285 	/*
286 	 * Initialize and free the full range of memory in each range.
287 	 * Note that the only writing these routines do are to the bootmap,
288 	 * and we've made sure to locate the bootmap properly so that they
289 	 * won't be writing over anything important.
290 	 */
291 
292 	bootmap_pfn = bootmap_start_pfn;
293 	max_pfn = 0;
294 	for (i = 0; i < npmem_ranges; i++) {
295 		unsigned long start_pfn;
296 		unsigned long npages;
297 
298 		start_pfn = pmem_ranges[i].start_pfn;
299 		npages = pmem_ranges[i].pages;
300 
301 		bootmap_size = init_bootmem_node(NODE_DATA(i),
302 						bootmap_pfn,
303 						start_pfn,
304 						(start_pfn + npages) );
305 		free_bootmem_node(NODE_DATA(i),
306 				  (start_pfn << PAGE_SHIFT),
307 				  (npages << PAGE_SHIFT) );
308 		bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309 		if ((start_pfn + npages) > max_pfn)
310 			max_pfn = start_pfn + npages;
311 	}
312 
313 	/* IOMMU is always used to access "high mem" on those boxes
314 	 * that can support enough mem that a PCI device couldn't
315 	 * directly DMA to any physical addresses.
316 	 * ISA DMA support will need to revisit this.
317 	 */
318 	max_low_pfn = max_pfn;
319 
320 	/* bootmap sizing messed up? */
321 	BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
322 
323 	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
324 
325 #define PDC_CONSOLE_IO_IODC_SIZE 32768
326 
327 	reserve_bootmem_node(NODE_DATA(0), 0UL,
328 			(unsigned long)(PAGE0->mem_free +
329 				PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
330 	reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
331 			(unsigned long)(_end - KERNEL_BINARY_TEXT_START),
332 			BOOTMEM_DEFAULT);
333 	reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
334 			((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
335 			BOOTMEM_DEFAULT);
336 
337 #ifndef CONFIG_DISCONTIGMEM
338 
339 	/* reserve the holes */
340 
341 	for (i = 0; i < npmem_holes; i++) {
342 		reserve_bootmem_node(NODE_DATA(0),
343 				(pmem_holes[i].start_pfn << PAGE_SHIFT),
344 				(pmem_holes[i].pages << PAGE_SHIFT),
345 				BOOTMEM_DEFAULT);
346 	}
347 #endif
348 
349 #ifdef CONFIG_BLK_DEV_INITRD
350 	if (initrd_start) {
351 		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
352 		if (__pa(initrd_start) < mem_max) {
353 			unsigned long initrd_reserve;
354 
355 			if (__pa(initrd_end) > mem_max) {
356 				initrd_reserve = mem_max - __pa(initrd_start);
357 			} else {
358 				initrd_reserve = initrd_end - initrd_start;
359 			}
360 			initrd_below_start_ok = 1;
361 			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
362 
363 			reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
364 					initrd_reserve, BOOTMEM_DEFAULT);
365 		}
366 	}
367 #endif
368 
369 	data_resource.start =  virt_to_phys(&data_start);
370 	data_resource.end = virt_to_phys(_end) - 1;
371 	code_resource.start = virt_to_phys(_text);
372 	code_resource.end = virt_to_phys(&data_start)-1;
373 
374 	/* We don't know which region the kernel will be in, so try
375 	 * all of them.
376 	 */
377 	for (i = 0; i < sysram_resource_count; i++) {
378 		struct resource *res = &sysram_resources[i];
379 		request_resource(res, &code_resource);
380 		request_resource(res, &data_resource);
381 	}
382 	request_resource(&sysram_resources[0], &pdcdata_resource);
383 }
384 
parisc_text_address(unsigned long vaddr)385 static int __init parisc_text_address(unsigned long vaddr)
386 {
387 	static unsigned long head_ptr __initdata;
388 
389 	if (!head_ptr)
390 		head_ptr = PAGE_MASK & (unsigned long)
391 			dereference_function_descriptor(&parisc_kernel_start);
392 
393 	return core_kernel_text(vaddr) || vaddr == head_ptr;
394 }
395 
map_pages(unsigned long start_vaddr,unsigned long start_paddr,unsigned long size,pgprot_t pgprot,int force)396 static void __init map_pages(unsigned long start_vaddr,
397 			     unsigned long start_paddr, unsigned long size,
398 			     pgprot_t pgprot, int force)
399 {
400 	pgd_t *pg_dir;
401 	pmd_t *pmd;
402 	pte_t *pg_table;
403 	unsigned long end_paddr;
404 	unsigned long start_pmd;
405 	unsigned long start_pte;
406 	unsigned long tmp1;
407 	unsigned long tmp2;
408 	unsigned long address;
409 	unsigned long vaddr;
410 	unsigned long ro_start;
411 	unsigned long ro_end;
412 	unsigned long kernel_end;
413 
414 	ro_start = __pa((unsigned long)_text);
415 	ro_end   = __pa((unsigned long)&data_start);
416 	kernel_end  = __pa((unsigned long)&_end);
417 
418 	end_paddr = start_paddr + size;
419 
420 	pg_dir = pgd_offset_k(start_vaddr);
421 
422 #if PTRS_PER_PMD == 1
423 	start_pmd = 0;
424 #else
425 	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
426 #endif
427 	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
428 
429 	address = start_paddr;
430 	vaddr = start_vaddr;
431 	while (address < end_paddr) {
432 #if PTRS_PER_PMD == 1
433 		pmd = (pmd_t *)__pa(pg_dir);
434 #else
435 		pmd = (pmd_t *)pgd_address(*pg_dir);
436 
437 		/*
438 		 * pmd is physical at this point
439 		 */
440 
441 		if (!pmd) {
442 			pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
443 			pmd = (pmd_t *) __pa(pmd);
444 		}
445 
446 		pgd_populate(NULL, pg_dir, __va(pmd));
447 #endif
448 		pg_dir++;
449 
450 		/* now change pmd to kernel virtual addresses */
451 
452 		pmd = (pmd_t *)__va(pmd) + start_pmd;
453 		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
454 
455 			/*
456 			 * pg_table is physical at this point
457 			 */
458 
459 			pg_table = (pte_t *)pmd_address(*pmd);
460 			if (!pg_table) {
461 				pg_table = (pte_t *)
462 					alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
463 				pg_table = (pte_t *) __pa(pg_table);
464 			}
465 
466 			pmd_populate_kernel(NULL, pmd, __va(pg_table));
467 
468 			/* now change pg_table to kernel virtual addresses */
469 
470 			pg_table = (pte_t *) __va(pg_table) + start_pte;
471 			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
472 				pte_t pte;
473 
474 				if (force)
475 					pte =  __mk_pte(address, pgprot);
476 				else if (parisc_text_address(vaddr)) {
477 					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
478 					if (address >= ro_start && address < kernel_end)
479 						pte = pte_mkhuge(pte);
480 				}
481 				else
482 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
483 				if (address >= ro_start && address < ro_end) {
484 					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
485 					pte = pte_mkhuge(pte);
486 				} else
487 #endif
488 				{
489 					pte = __mk_pte(address, pgprot);
490 					if (address >= ro_start && address < kernel_end)
491 						pte = pte_mkhuge(pte);
492 				}
493 
494 				if (address >= end_paddr)
495 					break;
496 
497 				set_pte(pg_table, pte);
498 
499 				address += PAGE_SIZE;
500 				vaddr += PAGE_SIZE;
501 			}
502 			start_pte = 0;
503 
504 			if (address >= end_paddr)
505 			    break;
506 		}
507 		start_pmd = 0;
508 	}
509 }
510 
free_initmem(void)511 void free_initmem(void)
512 {
513 	unsigned long init_begin = (unsigned long)__init_begin;
514 	unsigned long init_end = (unsigned long)__init_end;
515 
516 	/* The init text pages are marked R-X.  We have to
517 	 * flush the icache and mark them RW-
518 	 *
519 	 * This is tricky, because map_pages is in the init section.
520 	 * Do a dummy remap of the data section first (the data
521 	 * section is already PAGE_KERNEL) to pull in the TLB entries
522 	 * for map_kernel */
523 	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
524 		  PAGE_KERNEL_RWX, 1);
525 	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
526 	 * map_pages */
527 	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
528 		  PAGE_KERNEL, 1);
529 
530 	/* force the kernel to see the new TLB entries */
531 	__flush_tlb_range(0, init_begin, init_end);
532 
533 	/* finally dump all the instructions which were cached, since the
534 	 * pages are no-longer executable */
535 	flush_icache_range(init_begin, init_end);
536 
537 	free_initmem_default(POISON_FREE_INITMEM);
538 
539 	/* set up a new led state on systems shipped LED State panel */
540 	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
541 }
542 
543 
544 #ifdef CONFIG_DEBUG_RODATA
mark_rodata_ro(void)545 void mark_rodata_ro(void)
546 {
547 	/* rodata memory was already mapped with KERNEL_RO access rights by
548            pagetable_init() and map_pages(). No need to do additional stuff here */
549 	printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
550 		(unsigned long)(__end_rodata - __start_rodata) >> 10);
551 }
552 #endif
553 
554 
555 /*
556  * Just an arbitrary offset to serve as a "hole" between mapping areas
557  * (between top of physical memory and a potential pcxl dma mapping
558  * area, and below the vmalloc mapping area).
559  *
560  * The current 32K value just means that there will be a 32K "hole"
561  * between mapping areas. That means that  any out-of-bounds memory
562  * accesses will hopefully be caught. The vmalloc() routines leaves
563  * a hole of 4kB between each vmalloced area for the same reason.
564  */
565 
566  /* Leave room for gateway page expansion */
567 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
568 #error KERNEL_MAP_START is in gateway reserved region
569 #endif
570 #define MAP_START (KERNEL_MAP_START)
571 
572 #define VM_MAP_OFFSET  (32*1024)
573 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
574 				     & ~(VM_MAP_OFFSET-1)))
575 
576 void *parisc_vmalloc_start __read_mostly;
577 EXPORT_SYMBOL(parisc_vmalloc_start);
578 
579 #ifdef CONFIG_PA11
580 unsigned long pcxl_dma_start __read_mostly;
581 #endif
582 
mem_init(void)583 void __init mem_init(void)
584 {
585 	/* Do sanity checks on IPC (compat) structures */
586 	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
587 #ifndef CONFIG_64BIT
588 	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
589 	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
590 	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
591 #endif
592 #ifdef CONFIG_COMPAT
593 	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
594 	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
595 	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
596 	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
597 #endif
598 
599 	/* Do sanity checks on page table constants */
600 	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
601 	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
602 	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
603 	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
604 			> BITS_PER_LONG);
605 
606 	high_memory = __va((max_pfn << PAGE_SHIFT));
607 	set_max_mapnr(max_low_pfn);
608 	free_all_bootmem();
609 
610 #ifdef CONFIG_PA11
611 	if (hppa_dma_ops == &pcxl_dma_ops) {
612 		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
613 		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
614 						+ PCXL_DMA_MAP_SIZE);
615 	} else {
616 		pcxl_dma_start = 0;
617 		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
618 	}
619 #else
620 	parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
621 #endif
622 
623 	mem_init_print_info(NULL);
624 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
625 	printk("virtual kernel memory layout:\n"
626 	       "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n"
627 	       "    memory  : 0x%p - 0x%p   (%4ld MB)\n"
628 	       "      .init : 0x%p - 0x%p   (%4ld kB)\n"
629 	       "      .data : 0x%p - 0x%p   (%4ld kB)\n"
630 	       "      .text : 0x%p - 0x%p   (%4ld kB)\n",
631 
632 	       (void*)VMALLOC_START, (void*)VMALLOC_END,
633 	       (VMALLOC_END - VMALLOC_START) >> 20,
634 
635 	       __va(0), high_memory,
636 	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
637 
638 	       __init_begin, __init_end,
639 	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
640 
641 	       _etext, _edata,
642 	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
643 
644 	       _text, _etext,
645 	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
646 #endif
647 }
648 
649 unsigned long *empty_zero_page __read_mostly;
650 EXPORT_SYMBOL(empty_zero_page);
651 
show_mem(unsigned int filter)652 void show_mem(unsigned int filter)
653 {
654 	int total = 0,reserved = 0;
655 	pg_data_t *pgdat;
656 
657 	printk(KERN_INFO "Mem-info:\n");
658 	show_free_areas(filter);
659 
660 	for_each_online_pgdat(pgdat) {
661 		unsigned long flags;
662 		int zoneid;
663 
664 		pgdat_resize_lock(pgdat, &flags);
665 		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
666 			struct zone *zone = &pgdat->node_zones[zoneid];
667 			if (!populated_zone(zone))
668 				continue;
669 
670 			total += zone->present_pages;
671 			reserved = zone->present_pages - zone->managed_pages;
672 		}
673 		pgdat_resize_unlock(pgdat, &flags);
674 	}
675 
676 	printk(KERN_INFO "%d pages of RAM\n", total);
677 	printk(KERN_INFO "%d reserved pages\n", reserved);
678 
679 #ifdef CONFIG_DISCONTIGMEM
680 	{
681 		struct zonelist *zl;
682 		int i, j;
683 
684 		for (i = 0; i < npmem_ranges; i++) {
685 			zl = node_zonelist(i, 0);
686 			for (j = 0; j < MAX_NR_ZONES; j++) {
687 				struct zoneref *z;
688 				struct zone *zone;
689 
690 				printk("Zone list for zone %d on node %d: ", j, i);
691 				for_each_zone_zonelist(zone, z, zl, j)
692 					printk("[%d/%s] ", zone_to_nid(zone),
693 								zone->name);
694 				printk("\n");
695 			}
696 		}
697 	}
698 #endif
699 }
700 
701 /*
702  * pagetable_init() sets up the page tables
703  *
704  * Note that gateway_init() places the Linux gateway page at page 0.
705  * Since gateway pages cannot be dereferenced this has the desirable
706  * side effect of trapping those pesky NULL-reference errors in the
707  * kernel.
708  */
pagetable_init(void)709 static void __init pagetable_init(void)
710 {
711 	int range;
712 
713 	/* Map each physical memory range to its kernel vaddr */
714 
715 	for (range = 0; range < npmem_ranges; range++) {
716 		unsigned long start_paddr;
717 		unsigned long end_paddr;
718 		unsigned long size;
719 
720 		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
721 		size = pmem_ranges[range].pages << PAGE_SHIFT;
722 		end_paddr = start_paddr + size;
723 
724 		map_pages((unsigned long)__va(start_paddr), start_paddr,
725 			  size, PAGE_KERNEL, 0);
726 	}
727 
728 #ifdef CONFIG_BLK_DEV_INITRD
729 	if (initrd_end && initrd_end > mem_limit) {
730 		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
731 		map_pages(initrd_start, __pa(initrd_start),
732 			  initrd_end - initrd_start, PAGE_KERNEL, 0);
733 	}
734 #endif
735 
736 	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
737 }
738 
gateway_init(void)739 static void __init gateway_init(void)
740 {
741 	unsigned long linux_gateway_page_addr;
742 	/* FIXME: This is 'const' in order to trick the compiler
743 	   into not treating it as DP-relative data. */
744 	extern void * const linux_gateway_page;
745 
746 	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
747 
748 	/*
749 	 * Setup Linux Gateway page.
750 	 *
751 	 * The Linux gateway page will reside in kernel space (on virtual
752 	 * page 0), so it doesn't need to be aliased into user space.
753 	 */
754 
755 	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
756 		  PAGE_SIZE, PAGE_GATEWAY, 1);
757 }
758 
paging_init(void)759 void __init paging_init(void)
760 {
761 	int i;
762 
763 	setup_bootmem();
764 	pagetable_init();
765 	gateway_init();
766 	flush_cache_all_local(); /* start with known state */
767 	flush_tlb_all_local(NULL);
768 
769 	for (i = 0; i < npmem_ranges; i++) {
770 		unsigned long zones_size[MAX_NR_ZONES] = { 0, };
771 
772 		zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
773 
774 #ifdef CONFIG_DISCONTIGMEM
775 		/* Need to initialize the pfnnid_map before we can initialize
776 		   the zone */
777 		{
778 		    int j;
779 		    for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
780 			 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
781 			 j++) {
782 			pfnnid_map[j] = i;
783 		    }
784 		}
785 #endif
786 
787 		free_area_init_node(i, zones_size,
788 				pmem_ranges[i].start_pfn, NULL);
789 	}
790 }
791 
792 #ifdef CONFIG_PA20
793 
794 /*
795  * Currently, all PA20 chips have 18 bit protection IDs, which is the
796  * limiting factor (space ids are 32 bits).
797  */
798 
799 #define NR_SPACE_IDS 262144
800 
801 #else
802 
803 /*
804  * Currently we have a one-to-one relationship between space IDs and
805  * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
806  * support 15 bit protection IDs, so that is the limiting factor.
807  * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
808  * probably not worth the effort for a special case here.
809  */
810 
811 #define NR_SPACE_IDS 32768
812 
813 #endif  /* !CONFIG_PA20 */
814 
815 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
816 #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
817 
818 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
819 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
820 static unsigned long space_id_index;
821 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
822 static unsigned long dirty_space_ids = 0;
823 
824 static DEFINE_SPINLOCK(sid_lock);
825 
alloc_sid(void)826 unsigned long alloc_sid(void)
827 {
828 	unsigned long index;
829 
830 	spin_lock(&sid_lock);
831 
832 	if (free_space_ids == 0) {
833 		if (dirty_space_ids != 0) {
834 			spin_unlock(&sid_lock);
835 			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
836 			spin_lock(&sid_lock);
837 		}
838 		BUG_ON(free_space_ids == 0);
839 	}
840 
841 	free_space_ids--;
842 
843 	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
844 	space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
845 	space_id_index = index;
846 
847 	spin_unlock(&sid_lock);
848 
849 	return index << SPACEID_SHIFT;
850 }
851 
free_sid(unsigned long spaceid)852 void free_sid(unsigned long spaceid)
853 {
854 	unsigned long index = spaceid >> SPACEID_SHIFT;
855 	unsigned long *dirty_space_offset;
856 
857 	dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
858 	index &= (BITS_PER_LONG - 1);
859 
860 	spin_lock(&sid_lock);
861 
862 	BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
863 
864 	*dirty_space_offset |= (1L << index);
865 	dirty_space_ids++;
866 
867 	spin_unlock(&sid_lock);
868 }
869 
870 
871 #ifdef CONFIG_SMP
get_dirty_sids(unsigned long * ndirtyptr,unsigned long * dirty_array)872 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
873 {
874 	int i;
875 
876 	/* NOTE: sid_lock must be held upon entry */
877 
878 	*ndirtyptr = dirty_space_ids;
879 	if (dirty_space_ids != 0) {
880 	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
881 		dirty_array[i] = dirty_space_id[i];
882 		dirty_space_id[i] = 0;
883 	    }
884 	    dirty_space_ids = 0;
885 	}
886 
887 	return;
888 }
889 
recycle_sids(unsigned long ndirty,unsigned long * dirty_array)890 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
891 {
892 	int i;
893 
894 	/* NOTE: sid_lock must be held upon entry */
895 
896 	if (ndirty != 0) {
897 		for (i = 0; i < SID_ARRAY_SIZE; i++) {
898 			space_id[i] ^= dirty_array[i];
899 		}
900 
901 		free_space_ids += ndirty;
902 		space_id_index = 0;
903 	}
904 }
905 
906 #else /* CONFIG_SMP */
907 
recycle_sids(void)908 static void recycle_sids(void)
909 {
910 	int i;
911 
912 	/* NOTE: sid_lock must be held upon entry */
913 
914 	if (dirty_space_ids != 0) {
915 		for (i = 0; i < SID_ARRAY_SIZE; i++) {
916 			space_id[i] ^= dirty_space_id[i];
917 			dirty_space_id[i] = 0;
918 		}
919 
920 		free_space_ids += dirty_space_ids;
921 		dirty_space_ids = 0;
922 		space_id_index = 0;
923 	}
924 }
925 #endif
926 
927 /*
928  * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
929  * purged, we can safely reuse the space ids that were released but
930  * not flushed from the tlb.
931  */
932 
933 #ifdef CONFIG_SMP
934 
935 static unsigned long recycle_ndirty;
936 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
937 static unsigned int recycle_inuse;
938 
flush_tlb_all(void)939 void flush_tlb_all(void)
940 {
941 	int do_recycle;
942 
943 	do_recycle = 0;
944 	spin_lock(&sid_lock);
945 	__inc_irq_stat(irq_tlb_count);
946 	if (dirty_space_ids > RECYCLE_THRESHOLD) {
947 	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
948 	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
949 	    recycle_inuse++;
950 	    do_recycle++;
951 	}
952 	spin_unlock(&sid_lock);
953 	on_each_cpu(flush_tlb_all_local, NULL, 1);
954 	if (do_recycle) {
955 	    spin_lock(&sid_lock);
956 	    recycle_sids(recycle_ndirty,recycle_dirty_array);
957 	    recycle_inuse = 0;
958 	    spin_unlock(&sid_lock);
959 	}
960 }
961 #else
flush_tlb_all(void)962 void flush_tlb_all(void)
963 {
964 	spin_lock(&sid_lock);
965 	__inc_irq_stat(irq_tlb_count);
966 	flush_tlb_all_local(NULL);
967 	recycle_sids();
968 	spin_unlock(&sid_lock);
969 }
970 #endif
971 
972 #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)973 void free_initrd_mem(unsigned long start, unsigned long end)
974 {
975 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
976 }
977 #endif
978