1/* 2 * 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * 5 * Enhanced CPU detection and feature setting code by Mike Jagdis 6 * and Martin Mares, November 1997. 7 */ 8 9.text 10#include <linux/threads.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/segment.h> 14#include <asm/page_types.h> 15#include <asm/pgtable_types.h> 16#include <asm/cache.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/setup.h> 20#include <asm/processor-flags.h> 21#include <asm/msr-index.h> 22#include <asm/cpufeature.h> 23#include <asm/percpu.h> 24#include <asm/nops.h> 25 26/* Physical address */ 27#define pa(X) ((X) - __PAGE_OFFSET) 28 29/* 30 * References to members of the new_cpu_data structure. 31 */ 32 33#define X86 new_cpu_data+CPUINFO_x86 34#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 35#define X86_MODEL new_cpu_data+CPUINFO_x86_model 36#define X86_MASK new_cpu_data+CPUINFO_x86_mask 37#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 38#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 39#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 40#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 41 42/* 43 * This is how much memory in addition to the memory covered up to 44 * and including _end we need mapped initially. 45 * We need: 46 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 47 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 48 * 49 * Modulo rounding, each megabyte assigned here requires a kilobyte of 50 * memory, which is currently unreclaimed. 51 * 52 * This should be a multiple of a page. 53 * 54 * KERNEL_IMAGE_SIZE should be greater than pa(_end) 55 * and small than max_low_pfn, otherwise will waste some page table entries 56 */ 57 58#if PTRS_PER_PMD > 1 59#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 60#else 61#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 62#endif 63 64/* 65 * Number of possible pages in the lowmem region. 66 * 67 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a 68 * gas warning about overflowing shift count when gas has been compiled 69 * with only a host target support using a 32-bit type for internal 70 * representation. 71 */ 72LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT) 73 74/* Enough space to fit pagetables for the low memory linear map */ 75MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT 76 77/* 78 * Worst-case size of the kernel mapping we need to make: 79 * a relocatable kernel can live anywhere in lowmem, so we need to be able 80 * to map all of lowmem. 81 */ 82KERNEL_PAGES = LOWMEM_PAGES 83 84INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 85RESERVE_BRK(pagetables, INIT_MAP_SIZE) 86 87/* 88 * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 89 * %esi points to the real-mode code as a 32-bit pointer. 90 * CS and DS must be 4 GB flat segments, but we don't depend on 91 * any particular GDT layout, because we load our own as soon as we 92 * can. 93 */ 94__HEAD 95ENTRY(startup_32) 96 movl pa(stack_start),%ecx 97 98 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 99 us to not reload segments */ 100 testb $(1<<6), BP_loadflags(%esi) 101 jnz 2f 102 103/* 104 * Set segments to known values. 105 */ 106 lgdt pa(boot_gdt_descr) 107 movl $(__BOOT_DS),%eax 108 movl %eax,%ds 109 movl %eax,%es 110 movl %eax,%fs 111 movl %eax,%gs 112 movl %eax,%ss 1132: 114 leal -__PAGE_OFFSET(%ecx),%esp 115 116/* 117 * Clear BSS first so that there are no surprises... 118 */ 119 cld 120 xorl %eax,%eax 121 movl $pa(__bss_start),%edi 122 movl $pa(__bss_stop),%ecx 123 subl %edi,%ecx 124 shrl $2,%ecx 125 rep ; stosl 126/* 127 * Copy bootup parameters out of the way. 128 * Note: %esi still has the pointer to the real-mode data. 129 * With the kexec as boot loader, parameter segment might be loaded beyond 130 * kernel image and might not even be addressable by early boot page tables. 131 * (kexec on panic case). Hence copy out the parameters before initializing 132 * page tables. 133 */ 134 movl $pa(boot_params),%edi 135 movl $(PARAM_SIZE/4),%ecx 136 cld 137 rep 138 movsl 139 movl pa(boot_params) + NEW_CL_POINTER,%esi 140 andl %esi,%esi 141 jz 1f # No command line 142 movl $pa(boot_command_line),%edi 143 movl $(COMMAND_LINE_SIZE/4),%ecx 144 rep 145 movsl 1461: 147 148#ifdef CONFIG_OLPC 149 /* save OFW's pgdir table for later use when calling into OFW */ 150 movl %cr3, %eax 151 movl %eax, pa(olpc_ofw_pgd) 152#endif 153 154#ifdef CONFIG_MICROCODE_EARLY 155 /* Early load ucode on BSP. */ 156 call load_ucode_bsp 157#endif 158 159/* 160 * Initialize page tables. This creates a PDE and a set of page 161 * tables, which are located immediately beyond __brk_base. The variable 162 * _brk_end is set up to point to the first "safe" location. 163 * Mappings are created both at virtual address 0 (identity mapping) 164 * and PAGE_OFFSET for up to _end. 165 */ 166#ifdef CONFIG_X86_PAE 167 168 /* 169 * In PAE mode initial_page_table is statically defined to contain 170 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 171 * entries). The identity mapping is handled by pointing two PGD entries 172 * to the first kernel PMD. 173 * 174 * Note the upper half of each PMD or PTE are always zero at this stage. 175 */ 176 177#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 178 179 xorl %ebx,%ebx /* %ebx is kept at zero */ 180 181 movl $pa(__brk_base), %edi 182 movl $pa(initial_pg_pmd), %edx 183 movl $PTE_IDENT_ATTR, %eax 18410: 185 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 186 movl %ecx,(%edx) /* Store PMD entry */ 187 /* Upper half already zero */ 188 addl $8,%edx 189 movl $512,%ecx 19011: 191 stosl 192 xchgl %eax,%ebx 193 stosl 194 xchgl %eax,%ebx 195 addl $0x1000,%eax 196 loop 11b 197 198 /* 199 * End condition: we must map up to the end + MAPPING_BEYOND_END. 200 */ 201 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 202 cmpl %ebp,%eax 203 jb 10b 2041: 205 addl $__PAGE_OFFSET, %edi 206 movl %edi, pa(_brk_end) 207 shrl $12, %eax 208 movl %eax, pa(max_pfn_mapped) 209 210 /* Do early initialization of the fixmap area */ 211 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 212 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 213#else /* Not PAE */ 214 215page_pde_offset = (__PAGE_OFFSET >> 20); 216 217 movl $pa(__brk_base), %edi 218 movl $pa(initial_page_table), %edx 219 movl $PTE_IDENT_ATTR, %eax 22010: 221 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 222 movl %ecx,(%edx) /* Store identity PDE entry */ 223 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 224 addl $4,%edx 225 movl $1024, %ecx 22611: 227 stosl 228 addl $0x1000,%eax 229 loop 11b 230 /* 231 * End condition: we must map up to the end + MAPPING_BEYOND_END. 232 */ 233 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 234 cmpl %ebp,%eax 235 jb 10b 236 addl $__PAGE_OFFSET, %edi 237 movl %edi, pa(_brk_end) 238 shrl $12, %eax 239 movl %eax, pa(max_pfn_mapped) 240 241 /* Do early initialization of the fixmap area */ 242 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 243 movl %eax,pa(initial_page_table+0xffc) 244#endif 245 246#ifdef CONFIG_PARAVIRT 247 /* This is can only trip for a broken bootloader... */ 248 cmpw $0x207, pa(boot_params + BP_version) 249 jb default_entry 250 251 /* Paravirt-compatible boot parameters. Look to see what architecture 252 we're booting under. */ 253 movl pa(boot_params + BP_hardware_subarch), %eax 254 cmpl $num_subarch_entries, %eax 255 jae bad_subarch 256 257 movl pa(subarch_entries)(,%eax,4), %eax 258 subl $__PAGE_OFFSET, %eax 259 jmp *%eax 260 261bad_subarch: 262WEAK(lguest_entry) 263WEAK(xen_entry) 264 /* Unknown implementation; there's really 265 nothing we can do at this point. */ 266 ud2a 267 268 __INITDATA 269 270subarch_entries: 271 .long default_entry /* normal x86/PC */ 272 .long lguest_entry /* lguest hypervisor */ 273 .long xen_entry /* Xen hypervisor */ 274 .long default_entry /* Moorestown MID */ 275num_subarch_entries = (. - subarch_entries) / 4 276.previous 277#else 278 jmp default_entry 279#endif /* CONFIG_PARAVIRT */ 280 281#ifdef CONFIG_HOTPLUG_CPU 282/* 283 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 284 * up already except stack. We just set up stack here. Then call 285 * start_secondary(). 286 */ 287ENTRY(start_cpu0) 288 movl stack_start, %ecx 289 movl %ecx, %esp 290 jmp *(initial_code) 291ENDPROC(start_cpu0) 292#endif 293 294/* 295 * Non-boot CPU entry point; entered from trampoline.S 296 * We can't lgdt here, because lgdt itself uses a data segment, but 297 * we know the trampoline has already loaded the boot_gdt for us. 298 * 299 * If cpu hotplug is not supported then this code can go in init section 300 * which will be freed later 301 */ 302ENTRY(startup_32_smp) 303 cld 304 movl $(__BOOT_DS),%eax 305 movl %eax,%ds 306 movl %eax,%es 307 movl %eax,%fs 308 movl %eax,%gs 309 movl pa(stack_start),%ecx 310 movl %eax,%ss 311 leal -__PAGE_OFFSET(%ecx),%esp 312 313#ifdef CONFIG_MICROCODE_EARLY 314 /* Early load ucode on AP. */ 315 call load_ucode_ap 316#endif 317 318 319default_entry: 320#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 321 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 322 X86_CR0_PG) 323 movl $(CR0_STATE & ~X86_CR0_PG),%eax 324 movl %eax,%cr0 325 326/* 327 * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 328 * bits like NT set. This would confuse the debugger if this code is traced. So 329 * initialize them properly now before switching to protected mode. That means 330 * DF in particular (even though we have cleared it earlier after copying the 331 * command line) because GCC expects it. 332 */ 333 pushl $0 334 popfl 335 336/* 337 * New page tables may be in 4Mbyte page mode and may be using the global pages. 338 * 339 * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 340 * if and only if CPUID exists and has flags other than the FPU flag set. 341 */ 342 movl $-1,pa(X86_CPUID) # preset CPUID level 343 movl $X86_EFLAGS_ID,%ecx 344 pushl %ecx 345 popfl # set EFLAGS=ID 346 pushfl 347 popl %eax # get EFLAGS 348 testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 349 jz enable_paging # hw disallowed setting of ID bit 350 # which means no CPUID and no CR4 351 352 xorl %eax,%eax 353 cpuid 354 movl %eax,pa(X86_CPUID) # save largest std CPUID function 355 356 movl $1,%eax 357 cpuid 358 andl $~1,%edx # Ignore CPUID.FPU 359 jz enable_paging # No flags or only CPUID.FPU = no CR4 360 361 movl pa(mmu_cr4_features),%eax 362 movl %eax,%cr4 363 364 testb $X86_CR4_PAE, %al # check if PAE is enabled 365 jz enable_paging 366 367 /* Check if extended functions are implemented */ 368 movl $0x80000000, %eax 369 cpuid 370 /* Value must be in the range 0x80000001 to 0x8000ffff */ 371 subl $0x80000001, %eax 372 cmpl $(0x8000ffff-0x80000001), %eax 373 ja enable_paging 374 375 /* Clear bogus XD_DISABLE bits */ 376 call verify_cpu 377 378 mov $0x80000001, %eax 379 cpuid 380 /* Execute Disable bit supported? */ 381 btl $(X86_FEATURE_NX & 31), %edx 382 jnc enable_paging 383 384 /* Setup EFER (Extended Feature Enable Register) */ 385 movl $MSR_EFER, %ecx 386 rdmsr 387 388 btsl $_EFER_NX, %eax 389 /* Make changes effective */ 390 wrmsr 391 392enable_paging: 393 394/* 395 * Enable paging 396 */ 397 movl $pa(initial_page_table), %eax 398 movl %eax,%cr3 /* set the page table pointer.. */ 399 movl $CR0_STATE,%eax 400 movl %eax,%cr0 /* ..and set paging (PG) bit */ 401 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 4021: 403 /* Shift the stack pointer to a virtual address */ 404 addl $__PAGE_OFFSET, %esp 405 406/* 407 * start system 32-bit setup. We need to re-do some of the things done 408 * in 16-bit mode for the "real" operations. 409 */ 410 movl setup_once_ref,%eax 411 andl %eax,%eax 412 jz 1f # Did we do this already? 413 call *%eax 4141: 415 416/* 417 * Check if it is 486 418 */ 419 movb $4,X86 # at least 486 420 cmpl $-1,X86_CPUID 421 je is486 422 423 /* get vendor info */ 424 xorl %eax,%eax # call CPUID with 0 -> return vendor ID 425 cpuid 426 movl %eax,X86_CPUID # save CPUID level 427 movl %ebx,X86_VENDOR_ID # lo 4 chars 428 movl %edx,X86_VENDOR_ID+4 # next 4 chars 429 movl %ecx,X86_VENDOR_ID+8 # last 4 chars 430 431 orl %eax,%eax # do we have processor info as well? 432 je is486 433 434 movl $1,%eax # Use the CPUID instruction to get CPU type 435 cpuid 436 movb %al,%cl # save reg for future use 437 andb $0x0f,%ah # mask processor family 438 movb %ah,X86 439 andb $0xf0,%al # mask model 440 shrb $4,%al 441 movb %al,X86_MODEL 442 andb $0x0f,%cl # mask mask revision 443 movb %cl,X86_MASK 444 movl %edx,X86_CAPABILITY 445 446is486: 447 movl $0x50022,%ecx # set AM, WP, NE and MP 448 movl %cr0,%eax 449 andl $0x80000011,%eax # Save PG,PE,ET 450 orl %ecx,%eax 451 movl %eax,%cr0 452 453 lgdt early_gdt_descr 454 lidt idt_descr 455 ljmp $(__KERNEL_CS),$1f 4561: movl $(__KERNEL_DS),%eax # reload all the segment registers 457 movl %eax,%ss # after changing gdt. 458 459 movl $(__USER_DS),%eax # DS/ES contains default USER segment 460 movl %eax,%ds 461 movl %eax,%es 462 463 movl $(__KERNEL_PERCPU), %eax 464 movl %eax,%fs # set this cpu's percpu 465 466 movl $(__KERNEL_STACK_CANARY),%eax 467 movl %eax,%gs 468 469 xorl %eax,%eax # Clear LDT 470 lldt %ax 471 472 pushl $0 # fake return address for unwinder 473 jmp *(initial_code) 474 475#include "verify_cpu.S" 476 477/* 478 * setup_once 479 * 480 * The setup work we only want to run on the BSP. 481 * 482 * Warning: %esi is live across this function. 483 */ 484__INIT 485setup_once: 486 /* 487 * Set up a idt with 256 interrupt gates that push zero if there 488 * is no error code and then jump to early_idt_handler_common. 489 * It doesn't actually load the idt - that needs to be done on 490 * each CPU. Interrupts are enabled elsewhere, when we can be 491 * relatively sure everything is ok. 492 */ 493 494 movl $idt_table,%edi 495 movl $early_idt_handler_array,%eax 496 movl $NUM_EXCEPTION_VECTORS,%ecx 4971: 498 movl %eax,(%edi) 499 movl %eax,4(%edi) 500 /* interrupt gate, dpl=0, present */ 501 movl $(0x8E000000 + __KERNEL_CS),2(%edi) 502 addl $EARLY_IDT_HANDLER_SIZE,%eax 503 addl $8,%edi 504 loop 1b 505 506 movl $256 - NUM_EXCEPTION_VECTORS,%ecx 507 movl $ignore_int,%edx 508 movl $(__KERNEL_CS << 16),%eax 509 movw %dx,%ax /* selector = 0x0010 = cs */ 510 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 5112: 512 movl %eax,(%edi) 513 movl %edx,4(%edi) 514 addl $8,%edi 515 loop 2b 516 517#ifdef CONFIG_CC_STACKPROTECTOR 518 /* 519 * Configure the stack canary. The linker can't handle this by 520 * relocation. Manually set base address in stack canary 521 * segment descriptor. 522 */ 523 movl $gdt_page,%eax 524 movl $stack_canary,%ecx 525 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 526 shrl $16, %ecx 527 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 528 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 529#endif 530 531 andl $0,setup_once_ref /* Once is enough, thanks */ 532 ret 533 534ENTRY(early_idt_handler_array) 535 # 36(%esp) %eflags 536 # 32(%esp) %cs 537 # 28(%esp) %eip 538 # 24(%rsp) error code 539 i = 0 540 .rept NUM_EXCEPTION_VECTORS 541 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 542 pushl $0 # Dummy error code, to make stack frame uniform 543 .endif 544 pushl $i # 20(%esp) Vector number 545 jmp early_idt_handler_common 546 i = i + 1 547 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 548 .endr 549ENDPROC(early_idt_handler_array) 550 551early_idt_handler_common: 552 /* 553 * The stack is the hardware frame, an error code or zero, and the 554 * vector number. 555 */ 556 cld 557 558 cmpl $2,(%esp) # X86_TRAP_NMI 559 je is_nmi # Ignore NMI 560 561 cmpl $2,%ss:early_recursion_flag 562 je hlt_loop 563 incl %ss:early_recursion_flag 564 565 push %eax # 16(%esp) 566 push %ecx # 12(%esp) 567 push %edx # 8(%esp) 568 push %ds # 4(%esp) 569 push %es # 0(%esp) 570 movl $(__KERNEL_DS),%eax 571 movl %eax,%ds 572 movl %eax,%es 573 574 cmpl $(__KERNEL_CS),32(%esp) 575 jne 10f 576 577 leal 28(%esp),%eax # Pointer to %eip 578 call early_fixup_exception 579 andl %eax,%eax 580 jnz ex_entry /* found an exception entry */ 581 58210: 583#ifdef CONFIG_PRINTK 584 xorl %eax,%eax 585 movw %ax,2(%esp) /* clean up the segment values on some cpus */ 586 movw %ax,6(%esp) 587 movw %ax,34(%esp) 588 leal 40(%esp),%eax 589 pushl %eax /* %esp before the exception */ 590 pushl %ebx 591 pushl %ebp 592 pushl %esi 593 pushl %edi 594 movl %cr2,%eax 595 pushl %eax 596 pushl (20+6*4)(%esp) /* trapno */ 597 pushl $fault_msg 598 call printk 599#endif 600 call dump_stack 601hlt_loop: 602 hlt 603 jmp hlt_loop 604 605ex_entry: 606 pop %es 607 pop %ds 608 pop %edx 609 pop %ecx 610 pop %eax 611 decl %ss:early_recursion_flag 612is_nmi: 613 addl $8,%esp /* drop vector number and error code */ 614 iret 615ENDPROC(early_idt_handler_common) 616 617/* This is the default interrupt "handler" :-) */ 618 ALIGN 619ignore_int: 620 cld 621#ifdef CONFIG_PRINTK 622 pushl %eax 623 pushl %ecx 624 pushl %edx 625 pushl %es 626 pushl %ds 627 movl $(__KERNEL_DS),%eax 628 movl %eax,%ds 629 movl %eax,%es 630 cmpl $2,early_recursion_flag 631 je hlt_loop 632 incl early_recursion_flag 633 pushl 16(%esp) 634 pushl 24(%esp) 635 pushl 32(%esp) 636 pushl 40(%esp) 637 pushl $int_msg 638 call printk 639 640 call dump_stack 641 642 addl $(5*4),%esp 643 popl %ds 644 popl %es 645 popl %edx 646 popl %ecx 647 popl %eax 648#endif 649 iret 650ENDPROC(ignore_int) 651__INITDATA 652 .align 4 653early_recursion_flag: 654 .long 0 655 656__REFDATA 657 .align 4 658ENTRY(initial_code) 659 .long i386_start_kernel 660ENTRY(setup_once_ref) 661 .long setup_once 662 663/* 664 * BSS section 665 */ 666__PAGE_ALIGNED_BSS 667 .align PAGE_SIZE 668#ifdef CONFIG_X86_PAE 669initial_pg_pmd: 670 .fill 1024*KPMDS,4,0 671#else 672ENTRY(initial_page_table) 673 .fill 1024,4,0 674#endif 675initial_pg_fixmap: 676 .fill 1024,4,0 677ENTRY(empty_zero_page) 678 .fill 4096,1,0 679ENTRY(swapper_pg_dir) 680 .fill 1024,4,0 681 682/* 683 * This starts the data section. 684 */ 685#ifdef CONFIG_X86_PAE 686__PAGE_ALIGNED_DATA 687 /* Page-aligned for the benefit of paravirt? */ 688 .align PAGE_SIZE 689ENTRY(initial_page_table) 690 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 691# if KPMDS == 3 692 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 693 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 694 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 695# elif KPMDS == 2 696 .long 0,0 697 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 698 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 699# elif KPMDS == 1 700 .long 0,0 701 .long 0,0 702 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 703# else 704# error "Kernel PMDs should be 1, 2 or 3" 705# endif 706 .align PAGE_SIZE /* needs to be page-sized too */ 707#endif 708 709.data 710.balign 4 711ENTRY(stack_start) 712 .long init_thread_union+THREAD_SIZE 713 714__INITRODATA 715int_msg: 716 .asciz "Unknown interrupt or fault at: %p %p %p\n" 717 718fault_msg: 719/* fault info: */ 720 .ascii "BUG: Int %d: CR2 %p\n" 721/* regs pushed in early_idt_handler: */ 722 .ascii " EDI %p ESI %p EBP %p EBX %p\n" 723 .ascii " ESP %p ES %p DS %p\n" 724 .ascii " EDX %p ECX %p EAX %p\n" 725/* fault frame: */ 726 .ascii " vec %p err %p EIP %p CS %p flg %p\n" 727 .ascii "Stack: %p %p %p %p %p %p %p %p\n" 728 .ascii " %p %p %p %p %p %p %p %p\n" 729 .asciz " %p %p %p %p %p %p %p %p\n" 730 731#include "../../x86/xen/xen-head.S" 732 733/* 734 * The IDT and GDT 'descriptors' are a strange 48-bit object 735 * only used by the lidt and lgdt instructions. They are not 736 * like usual segment descriptors - they consist of a 16-bit 737 * segment size, and 32-bit linear address value: 738 */ 739 740 .data 741.globl boot_gdt_descr 742.globl idt_descr 743 744 ALIGN 745# early boot GDT descriptor (must use 1:1 address mapping) 746 .word 0 # 32 bit align gdt_desc.address 747boot_gdt_descr: 748 .word __BOOT_DS+7 749 .long boot_gdt - __PAGE_OFFSET 750 751 .word 0 # 32-bit align idt_desc.address 752idt_descr: 753 .word IDT_ENTRIES*8-1 # idt contains 256 entries 754 .long idt_table 755 756# boot GDT descriptor (later on used by CPU#0): 757 .word 0 # 32 bit align gdt_desc.address 758ENTRY(early_gdt_descr) 759 .word GDT_ENTRIES*8-1 760 .long gdt_page /* Overwritten for secondary CPUs */ 761 762/* 763 * The boot_gdt must mirror the equivalent in setup.S and is 764 * used only for booting. 765 */ 766 .align L1_CACHE_BYTES 767ENTRY(boot_gdt) 768 .fill GDT_ENTRY_BOOT_CS,8,0 769 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 770 .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 771