1 /*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9
10 #include <linux/bootmem.h>
11 #include <linux/efi.h>
12 #include <linux/elf.h>
13 #include <linux/mm.h>
14 #include <linux/mmzone.h>
15 #include <linux/module.h>
16 #include <linux/personality.h>
17 #include <linux/reboot.h>
18 #include <linux/slab.h>
19 #include <linux/swap.h>
20 #include <linux/proc_fs.h>
21 #include <linux/bitops.h>
22 #include <linux/kexec.h>
23
24 #include <asm/dma.h>
25 #include <asm/ia32.h>
26 #include <asm/io.h>
27 #include <asm/machvec.h>
28 #include <asm/numa.h>
29 #include <asm/patch.h>
30 #include <asm/pgalloc.h>
31 #include <asm/sal.h>
32 #include <asm/sections.h>
33 #include <asm/system.h>
34 #include <asm/tlb.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/mca.h>
38
39 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41 extern void ia64_tlb_init (void);
42
43 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
44
45 #ifdef CONFIG_VIRTUAL_MEM_MAP
46 unsigned long vmalloc_end = VMALLOC_END_INIT;
47 EXPORT_SYMBOL(vmalloc_end);
48 struct page *vmem_map;
49 EXPORT_SYMBOL(vmem_map);
50 #endif
51
52 struct page *zero_page_memmap_ptr; /* map entry for zero page */
53 EXPORT_SYMBOL(zero_page_memmap_ptr);
54
55 void
__ia64_sync_icache_dcache(pte_t pte)56 __ia64_sync_icache_dcache (pte_t pte)
57 {
58 unsigned long addr;
59 struct page *page;
60
61 page = pte_page(pte);
62 addr = (unsigned long) page_address(page);
63
64 if (test_bit(PG_arch_1, &page->flags))
65 return; /* i-cache is already coherent with d-cache */
66
67 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
69 }
70
71 /*
72 * Since DMA is i-cache coherent, any (complete) pages that were written via
73 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
74 * flush them when they get mapped into an executable vm-area.
75 */
76 void
dma_mark_clean(void * addr,size_t size)77 dma_mark_clean(void *addr, size_t size)
78 {
79 unsigned long pg_addr, end;
80
81 pg_addr = PAGE_ALIGN((unsigned long) addr);
82 end = (unsigned long) addr + size;
83 while (pg_addr + PAGE_SIZE <= end) {
84 struct page *page = virt_to_page(pg_addr);
85 set_bit(PG_arch_1, &page->flags);
86 pg_addr += PAGE_SIZE;
87 }
88 }
89
90 inline void
ia64_set_rbs_bot(void)91 ia64_set_rbs_bot (void)
92 {
93 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
94
95 if (stack_size > MAX_USER_STACK_SIZE)
96 stack_size = MAX_USER_STACK_SIZE;
97 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
98 }
99
100 /*
101 * This performs some platform-dependent address space initialization.
102 * On IA-64, we want to setup the VM area for the register backing
103 * store (which grows upwards) and install the gateway page which is
104 * used for signal trampolines, etc.
105 */
106 void
ia64_init_addr_space(void)107 ia64_init_addr_space (void)
108 {
109 struct vm_area_struct *vma;
110
111 ia64_set_rbs_bot();
112
113 /*
114 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
115 * the problem. When the process attempts to write to the register backing store
116 * for the first time, it will get a SEGFAULT in this case.
117 */
118 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
119 if (vma) {
120 vma->vm_mm = current->mm;
121 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
122 vma->vm_end = vma->vm_start + PAGE_SIZE;
123 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
124 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
125 down_write(¤t->mm->mmap_sem);
126 if (insert_vm_struct(current->mm, vma)) {
127 up_write(¤t->mm->mmap_sem);
128 kmem_cache_free(vm_area_cachep, vma);
129 return;
130 }
131 up_write(¤t->mm->mmap_sem);
132 }
133
134 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
135 if (!(current->personality & MMAP_PAGE_ZERO)) {
136 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
137 if (vma) {
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
142 down_write(¤t->mm->mmap_sem);
143 if (insert_vm_struct(current->mm, vma)) {
144 up_write(¤t->mm->mmap_sem);
145 kmem_cache_free(vm_area_cachep, vma);
146 return;
147 }
148 up_write(¤t->mm->mmap_sem);
149 }
150 }
151 }
152
153 void
free_initmem(void)154 free_initmem (void)
155 {
156 unsigned long addr, eaddr;
157
158 addr = (unsigned long) ia64_imva(__init_begin);
159 eaddr = (unsigned long) ia64_imva(__init_end);
160 while (addr < eaddr) {
161 ClearPageReserved(virt_to_page(addr));
162 init_page_count(virt_to_page(addr));
163 free_page(addr);
164 ++totalram_pages;
165 addr += PAGE_SIZE;
166 }
167 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
168 (__init_end - __init_begin) >> 10);
169 }
170
171 void __init
free_initrd_mem(unsigned long start,unsigned long end)172 free_initrd_mem (unsigned long start, unsigned long end)
173 {
174 struct page *page;
175 /*
176 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
177 * Thus EFI and the kernel may have different page sizes. It is
178 * therefore possible to have the initrd share the same page as
179 * the end of the kernel (given current setup).
180 *
181 * To avoid freeing/using the wrong page (kernel sized) we:
182 * - align up the beginning of initrd
183 * - align down the end of initrd
184 *
185 * | |
186 * |=============| a000
187 * | |
188 * | |
189 * | | 9000
190 * |/////////////|
191 * |/////////////|
192 * |=============| 8000
193 * |///INITRD////|
194 * |/////////////|
195 * |/////////////| 7000
196 * | |
197 * |KKKKKKKKKKKKK|
198 * |=============| 6000
199 * |KKKKKKKKKKKKK|
200 * |KKKKKKKKKKKKK|
201 * K=kernel using 8KB pages
202 *
203 * In this example, we must free page 8000 ONLY. So we must align up
204 * initrd_start and keep initrd_end as is.
205 */
206 start = PAGE_ALIGN(start);
207 end = end & PAGE_MASK;
208
209 if (start < end)
210 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
211
212 for (; start < end; start += PAGE_SIZE) {
213 if (!virt_addr_valid(start))
214 continue;
215 page = virt_to_page(start);
216 ClearPageReserved(page);
217 init_page_count(page);
218 free_page(start);
219 ++totalram_pages;
220 }
221 }
222
223 /*
224 * This installs a clean page in the kernel's page table.
225 */
226 static struct page * __init
put_kernel_page(struct page * page,unsigned long address,pgprot_t pgprot)227 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
228 {
229 pgd_t *pgd;
230 pud_t *pud;
231 pmd_t *pmd;
232 pte_t *pte;
233
234 if (!PageReserved(page))
235 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
236 page_address(page));
237
238 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
239
240 {
241 pud = pud_alloc(&init_mm, pgd, address);
242 if (!pud)
243 goto out;
244 pmd = pmd_alloc(&init_mm, pud, address);
245 if (!pmd)
246 goto out;
247 pte = pte_alloc_kernel(pmd, address);
248 if (!pte)
249 goto out;
250 if (!pte_none(*pte))
251 goto out;
252 set_pte(pte, mk_pte(page, pgprot));
253 }
254 out:
255 /* no need for flush_tlb */
256 return page;
257 }
258
259 static void __init
setup_gate(void)260 setup_gate (void)
261 {
262 struct page *page;
263
264 /*
265 * Map the gate page twice: once read-only to export the ELF
266 * headers etc. and once execute-only page to enable
267 * privilege-promotion via "epc":
268 */
269 page = virt_to_page(ia64_imva(__start_gate_section));
270 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
271 #ifdef HAVE_BUGGY_SEGREL
272 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
273 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
274 #else
275 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
276 /* Fill in the holes (if any) with read-only zero pages: */
277 {
278 unsigned long addr;
279
280 for (addr = GATE_ADDR + PAGE_SIZE;
281 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
282 addr += PAGE_SIZE)
283 {
284 put_kernel_page(ZERO_PAGE(0), addr,
285 PAGE_READONLY);
286 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
287 PAGE_READONLY);
288 }
289 }
290 #endif
291 ia64_patch_gate();
292 }
293
294 void __devinit
ia64_mmu_init(void * my_cpu_data)295 ia64_mmu_init (void *my_cpu_data)
296 {
297 unsigned long pta, impl_va_bits;
298 extern void __devinit tlb_init (void);
299
300 #ifdef CONFIG_DISABLE_VHPT
301 # define VHPT_ENABLE_BIT 0
302 #else
303 # define VHPT_ENABLE_BIT 1
304 #endif
305
306 /*
307 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
308 * address space. The IA-64 architecture guarantees that at least 50 bits of
309 * virtual address space are implemented but if we pick a large enough page size
310 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
311 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
312 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
313 * problem in practice. Alternatively, we could truncate the top of the mapped
314 * address space to not permit mappings that would overlap with the VMLPT.
315 * --davidm 00/12/06
316 */
317 # define pte_bits 3
318 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
319 /*
320 * The virtual page table has to cover the entire implemented address space within
321 * a region even though not all of this space may be mappable. The reason for
322 * this is that the Access bit and Dirty bit fault handlers perform
323 * non-speculative accesses to the virtual page table, so the address range of the
324 * virtual page table itself needs to be covered by virtual page table.
325 */
326 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
327 # define POW2(n) (1ULL << (n))
328
329 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
330
331 if (impl_va_bits < 51 || impl_va_bits > 61)
332 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
333 /*
334 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
335 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
336 * the test makes sure that our mapped space doesn't overlap the
337 * unimplemented hole in the middle of the region.
338 */
339 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
340 (mapped_space_bits > impl_va_bits - 1))
341 panic("Cannot build a big enough virtual-linear page table"
342 " to cover mapped address space.\n"
343 " Try using a smaller page size.\n");
344
345
346 /* place the VMLPT at the end of each page-table mapped region: */
347 pta = POW2(61) - POW2(vmlpt_bits);
348
349 /*
350 * Set the (virtually mapped linear) page table address. Bit
351 * 8 selects between the short and long format, bits 2-7 the
352 * size of the table, and bit 0 whether the VHPT walker is
353 * enabled.
354 */
355 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
356
357 ia64_tlb_init();
358
359 #ifdef CONFIG_HUGETLB_PAGE
360 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
361 ia64_srlz_d();
362 #endif
363 }
364
365 #ifdef CONFIG_VIRTUAL_MEM_MAP
vmemmap_find_next_valid_pfn(int node,int i)366 int vmemmap_find_next_valid_pfn(int node, int i)
367 {
368 unsigned long end_address, hole_next_pfn;
369 unsigned long stop_address;
370 pg_data_t *pgdat = NODE_DATA(node);
371
372 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
373 end_address = PAGE_ALIGN(end_address);
374
375 stop_address = (unsigned long) &vmem_map[
376 pgdat->node_start_pfn + pgdat->node_spanned_pages];
377
378 do {
379 pgd_t *pgd;
380 pud_t *pud;
381 pmd_t *pmd;
382 pte_t *pte;
383
384 pgd = pgd_offset_k(end_address);
385 if (pgd_none(*pgd)) {
386 end_address += PGDIR_SIZE;
387 continue;
388 }
389
390 pud = pud_offset(pgd, end_address);
391 if (pud_none(*pud)) {
392 end_address += PUD_SIZE;
393 continue;
394 }
395
396 pmd = pmd_offset(pud, end_address);
397 if (pmd_none(*pmd)) {
398 end_address += PMD_SIZE;
399 continue;
400 }
401
402 pte = pte_offset_kernel(pmd, end_address);
403 retry_pte:
404 if (pte_none(*pte)) {
405 end_address += PAGE_SIZE;
406 pte++;
407 if ((end_address < stop_address) &&
408 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
409 goto retry_pte;
410 continue;
411 }
412 /* Found next valid vmem_map page */
413 break;
414 } while (end_address < stop_address);
415
416 end_address = min(end_address, stop_address);
417 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
418 hole_next_pfn = end_address / sizeof(struct page);
419 return hole_next_pfn - pgdat->node_start_pfn;
420 }
421
422 int __init
create_mem_map_page_table(u64 start,u64 end,void * arg)423 create_mem_map_page_table (u64 start, u64 end, void *arg)
424 {
425 unsigned long address, start_page, end_page;
426 struct page *map_start, *map_end;
427 int node;
428 pgd_t *pgd;
429 pud_t *pud;
430 pmd_t *pmd;
431 pte_t *pte;
432
433 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
434 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
435
436 start_page = (unsigned long) map_start & PAGE_MASK;
437 end_page = PAGE_ALIGN((unsigned long) map_end);
438 node = paddr_to_nid(__pa(start));
439
440 for (address = start_page; address < end_page; address += PAGE_SIZE) {
441 pgd = pgd_offset_k(address);
442 if (pgd_none(*pgd))
443 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
444 pud = pud_offset(pgd, address);
445
446 if (pud_none(*pud))
447 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
448 pmd = pmd_offset(pud, address);
449
450 if (pmd_none(*pmd))
451 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
452 pte = pte_offset_kernel(pmd, address);
453
454 if (pte_none(*pte))
455 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
456 PAGE_KERNEL));
457 }
458 return 0;
459 }
460
461 struct memmap_init_callback_data {
462 struct page *start;
463 struct page *end;
464 int nid;
465 unsigned long zone;
466 };
467
468 static int __meminit
virtual_memmap_init(u64 start,u64 end,void * arg)469 virtual_memmap_init (u64 start, u64 end, void *arg)
470 {
471 struct memmap_init_callback_data *args;
472 struct page *map_start, *map_end;
473
474 args = (struct memmap_init_callback_data *) arg;
475 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
476 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
477
478 if (map_start < args->start)
479 map_start = args->start;
480 if (map_end > args->end)
481 map_end = args->end;
482
483 /*
484 * We have to initialize "out of bounds" struct page elements that fit completely
485 * on the same pages that were allocated for the "in bounds" elements because they
486 * may be referenced later (and found to be "reserved").
487 */
488 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
489 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
490 / sizeof(struct page));
491
492 if (map_start < map_end)
493 memmap_init_zone((unsigned long)(map_end - map_start),
494 args->nid, args->zone, page_to_pfn(map_start),
495 MEMMAP_EARLY);
496 return 0;
497 }
498
499 void __meminit
memmap_init(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn)500 memmap_init (unsigned long size, int nid, unsigned long zone,
501 unsigned long start_pfn)
502 {
503 if (!vmem_map)
504 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
505 else {
506 struct page *start;
507 struct memmap_init_callback_data args;
508
509 start = pfn_to_page(start_pfn);
510 args.start = start;
511 args.end = start + size;
512 args.nid = nid;
513 args.zone = zone;
514
515 efi_memmap_walk(virtual_memmap_init, &args);
516 }
517 }
518
519 int
ia64_pfn_valid(unsigned long pfn)520 ia64_pfn_valid (unsigned long pfn)
521 {
522 char byte;
523 struct page *pg = pfn_to_page(pfn);
524
525 return (__get_user(byte, (char __user *) pg) == 0)
526 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
527 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
528 }
529 EXPORT_SYMBOL(ia64_pfn_valid);
530
531 int __init
find_largest_hole(u64 start,u64 end,void * arg)532 find_largest_hole (u64 start, u64 end, void *arg)
533 {
534 u64 *max_gap = arg;
535
536 static u64 last_end = PAGE_OFFSET;
537
538 /* NOTE: this algorithm assumes efi memmap table is ordered */
539
540 if (*max_gap < (start - last_end))
541 *max_gap = start - last_end;
542 last_end = end;
543 return 0;
544 }
545
546 #endif /* CONFIG_VIRTUAL_MEM_MAP */
547
548 int __init
register_active_ranges(u64 start,u64 len,int nid)549 register_active_ranges(u64 start, u64 len, int nid)
550 {
551 u64 end = start + len;
552
553 #ifdef CONFIG_KEXEC
554 if (start > crashk_res.start && start < crashk_res.end)
555 start = crashk_res.end;
556 if (end > crashk_res.start && end < crashk_res.end)
557 end = crashk_res.start;
558 #endif
559
560 if (start < end)
561 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
562 __pa(end) >> PAGE_SHIFT);
563 return 0;
564 }
565
566 static int __init
count_reserved_pages(u64 start,u64 end,void * arg)567 count_reserved_pages (u64 start, u64 end, void *arg)
568 {
569 unsigned long num_reserved = 0;
570 unsigned long *count = arg;
571
572 for (; start < end; start += PAGE_SIZE)
573 if (PageReserved(virt_to_page(start)))
574 ++num_reserved;
575 *count += num_reserved;
576 return 0;
577 }
578
579 int
find_max_min_low_pfn(unsigned long start,unsigned long end,void * arg)580 find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
581 {
582 unsigned long pfn_start, pfn_end;
583 #ifdef CONFIG_FLATMEM
584 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
585 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
586 #else
587 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
588 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
589 #endif
590 min_low_pfn = min(min_low_pfn, pfn_start);
591 max_low_pfn = max(max_low_pfn, pfn_end);
592 return 0;
593 }
594
595 /*
596 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
597 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
598 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
599 * useful for performance testing, but conceivably could also come in handy for debugging
600 * purposes.
601 */
602
603 static int nolwsys __initdata;
604
605 static int __init
nolwsys_setup(char * s)606 nolwsys_setup (char *s)
607 {
608 nolwsys = 1;
609 return 1;
610 }
611
612 __setup("nolwsys", nolwsys_setup);
613
614 void __init
mem_init(void)615 mem_init (void)
616 {
617 long reserved_pages, codesize, datasize, initsize;
618 pg_data_t *pgdat;
619 int i;
620 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
621
622 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
623 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
624 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
625
626 #ifdef CONFIG_PCI
627 /*
628 * This needs to be called _after_ the command line has been parsed but _before_
629 * any drivers that may need the PCI DMA interface are initialized or bootmem has
630 * been freed.
631 */
632 platform_dma_init();
633 #endif
634
635 #ifdef CONFIG_FLATMEM
636 if (!mem_map)
637 BUG();
638 max_mapnr = max_low_pfn;
639 #endif
640
641 high_memory = __va(max_low_pfn * PAGE_SIZE);
642
643 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
644 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
645 kclist_add(&kcore_kernel, _stext, _end - _stext);
646
647 for_each_online_pgdat(pgdat)
648 if (pgdat->bdata->node_bootmem_map)
649 totalram_pages += free_all_bootmem_node(pgdat);
650
651 reserved_pages = 0;
652 efi_memmap_walk(count_reserved_pages, &reserved_pages);
653
654 codesize = (unsigned long) _etext - (unsigned long) _stext;
655 datasize = (unsigned long) _edata - (unsigned long) _etext;
656 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
657
658 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
659 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
660 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
661 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
662
663
664 /*
665 * For fsyscall entrpoints with no light-weight handler, use the ordinary
666 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
667 * code can tell them apart.
668 */
669 for (i = 0; i < NR_syscalls; ++i) {
670 extern unsigned long fsyscall_table[NR_syscalls];
671 extern unsigned long sys_call_table[NR_syscalls];
672
673 if (!fsyscall_table[i] || nolwsys)
674 fsyscall_table[i] = sys_call_table[i] | 1;
675 }
676 setup_gate();
677
678 #ifdef CONFIG_IA32_SUPPORT
679 ia32_mem_init();
680 #endif
681 }
682
683 #ifdef CONFIG_MEMORY_HOTPLUG
arch_add_memory(int nid,u64 start,u64 size)684 int arch_add_memory(int nid, u64 start, u64 size)
685 {
686 pg_data_t *pgdat;
687 struct zone *zone;
688 unsigned long start_pfn = start >> PAGE_SHIFT;
689 unsigned long nr_pages = size >> PAGE_SHIFT;
690 int ret;
691
692 pgdat = NODE_DATA(nid);
693
694 zone = pgdat->node_zones + ZONE_NORMAL;
695 ret = __add_pages(nid, zone, start_pfn, nr_pages);
696
697 if (ret)
698 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
699 __func__, ret);
700
701 return ret;
702 }
703 #endif
704
705 /*
706 * Even when CONFIG_IA32_SUPPORT is not enabled it is
707 * useful to have the Linux/x86 domain registered to
708 * avoid an attempted module load when emulators call
709 * personality(PER_LINUX32). This saves several milliseconds
710 * on each such call.
711 */
712 static struct exec_domain ia32_exec_domain;
713
714 static int __init
per_linux32_init(void)715 per_linux32_init(void)
716 {
717 ia32_exec_domain.name = "Linux/x86";
718 ia32_exec_domain.handler = NULL;
719 ia32_exec_domain.pers_low = PER_LINUX32;
720 ia32_exec_domain.pers_high = PER_LINUX32;
721 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
722 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
723 register_exec_domain(&ia32_exec_domain);
724
725 return 0;
726 }
727
728 __initcall(per_linux32_init);
729