1/* 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23 24#ifdef CONFIG_PARAVIRT 25#include <asm/asm-offsets.h> 26#include <asm/paravirt.h> 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 28#else 29#define GET_CR2_INTO(reg) movq %cr2, reg 30#define INTERRUPT_RETURN iretq 31#endif 32 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 34 * because we need identity-mapped pages. 35 * 36 */ 37 38#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 39 40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) 41L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) 42L4_START_KERNEL = pgd_index(__START_KERNEL_map) 43L3_START_KERNEL = pud_index(__START_KERNEL_map) 44 45 .text 46 __HEAD 47 .code64 48 .globl startup_64 49startup_64: 50 /* 51 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 52 * and someone has loaded an identity mapped page table 53 * for us. These identity mapped page tables map all of the 54 * kernel pages and possibly all of memory. 55 * 56 * %rsi holds a physical pointer to real_mode_data. 57 * 58 * We come here either directly from a 64bit bootloader, or from 59 * arch/x86/boot/compressed/head_64.S. 60 * 61 * We only come here initially at boot nothing else comes here. 62 * 63 * Since we may be loaded at an address different from what we were 64 * compiled to run at we first fixup the physical addresses in our page 65 * tables and then reload them. 66 */ 67 68 /* Sanitize CPU configuration */ 69 call verify_cpu 70 71 /* 72 * Compute the delta between the address I am compiled to run at and the 73 * address I am actually running at. 74 */ 75 leaq _text(%rip), %rbp 76 subq $_text - __START_KERNEL_map, %rbp 77 78 /* Is the address not 2M aligned? */ 79 testl $~PMD_PAGE_MASK, %ebp 80 jnz bad_address 81 82 /* 83 * Is the address too large? 84 */ 85 leaq _text(%rip), %rax 86 shrq $MAX_PHYSMEM_BITS, %rax 87 jnz bad_address 88 89 /* 90 * Fixup the physical addresses in the page table 91 */ 92 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) 93 94 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 95 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 96 97 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 98 99 /* 100 * Set up the identity mapping for the switchover. These 101 * entries should *NOT* have the global bit set! This also 102 * creates a bunch of nonsense entries but that is fine -- 103 * it avoids problems around wraparound. 104 */ 105 leaq _text(%rip), %rdi 106 leaq early_level4_pgt(%rip), %rbx 107 108 movq %rdi, %rax 109 shrq $PGDIR_SHIFT, %rax 110 111 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx 112 movq %rdx, 0(%rbx,%rax,8) 113 movq %rdx, 8(%rbx,%rax,8) 114 115 addq $4096, %rdx 116 movq %rdi, %rax 117 shrq $PUD_SHIFT, %rax 118 andl $(PTRS_PER_PUD-1), %eax 119 movq %rdx, 4096(%rbx,%rax,8) 120 incl %eax 121 andl $(PTRS_PER_PUD-1), %eax 122 movq %rdx, 4096(%rbx,%rax,8) 123 124 addq $8192, %rbx 125 movq %rdi, %rax 126 shrq $PMD_SHIFT, %rdi 127 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax 128 leaq (_end - 1)(%rip), %rcx 129 shrq $PMD_SHIFT, %rcx 130 subq %rdi, %rcx 131 incl %ecx 132 1331: 134 andq $(PTRS_PER_PMD - 1), %rdi 135 movq %rax, (%rbx,%rdi,8) 136 incq %rdi 137 addq $PMD_SIZE, %rax 138 decl %ecx 139 jnz 1b 140 141 /* 142 * Fixup the kernel text+data virtual addresses. Note that 143 * we might write invalid pmds, when the kernel is relocated 144 * cleanup_highmap() fixes this up along with the mappings 145 * beyond _end. 146 */ 147 leaq level2_kernel_pgt(%rip), %rdi 148 leaq 4096(%rdi), %r8 149 /* See if it is a valid page table entry */ 1501: testb $1, 0(%rdi) 151 jz 2f 152 addq %rbp, 0(%rdi) 153 /* Go to the next page */ 1542: addq $8, %rdi 155 cmp %r8, %rdi 156 jne 1b 157 158 /* Fixup phys_base */ 159 addq %rbp, phys_base(%rip) 160 161 movq $(early_level4_pgt - __START_KERNEL_map), %rax 162 jmp 1f 163ENTRY(secondary_startup_64) 164 /* 165 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 166 * and someone has loaded a mapped page table. 167 * 168 * %rsi holds a physical pointer to real_mode_data. 169 * 170 * We come here either from startup_64 (using physical addresses) 171 * or from trampoline.S (using virtual addresses). 172 * 173 * Using virtual addresses from trampoline.S removes the need 174 * to have any identity mapped pages in the kernel page table 175 * after the boot processor executes this code. 176 */ 177 178 /* Sanitize CPU configuration */ 179 call verify_cpu 180 181 movq $(init_level4_pgt - __START_KERNEL_map), %rax 1821: 183 184 /* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */ 185 movl $(X86_CR4_PAE | X86_CR4_PSE), %ecx 186 movq %rcx, %cr4 187 188 /* Setup early boot stage 4 level pagetables. */ 189 addq phys_base(%rip), %rax 190 movq %rax, %cr3 191 192 /* Ensure I am executing from virtual addresses */ 193 movq $1f, %rax 194 jmp *%rax 1951: 196 197 /* Check if nx is implemented */ 198 movl $0x80000001, %eax 199 cpuid 200 movl %edx,%edi 201 202 /* Setup EFER (Extended Feature Enable Register) */ 203 movl $MSR_EFER, %ecx 204 rdmsr 205 btsl $_EFER_SCE, %eax /* Enable System Call */ 206 btl $20,%edi /* No Execute supported? */ 207 jnc 1f 208 btsl $_EFER_NX, %eax 209 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2101: wrmsr /* Make changes effective */ 211 212 /* Setup cr0 */ 213#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 214 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 215 X86_CR0_PG) 216 movl $CR0_STATE, %eax 217 /* Make changes effective */ 218 movq %rax, %cr0 219 220 /* Setup a boot time stack */ 221 movq stack_start(%rip), %rsp 222 223 /* zero EFLAGS after setting rsp */ 224 pushq $0 225 popfq 226 227 /* 228 * We must switch to a new descriptor in kernel space for the GDT 229 * because soon the kernel won't have access anymore to the userspace 230 * addresses where we're currently running on. We have to do that here 231 * because in 32bit we couldn't load a 64bit linear address. 232 */ 233 lgdt early_gdt_descr(%rip) 234 235 /* set up data segments */ 236 xorl %eax,%eax 237 movl %eax,%ds 238 movl %eax,%ss 239 movl %eax,%es 240 241 /* 242 * We don't really need to load %fs or %gs, but load them anyway 243 * to kill any stale realmode selectors. This allows execution 244 * under VT hardware. 245 */ 246 movl %eax,%fs 247 movl %eax,%gs 248 249 /* Set up %gs. 250 * 251 * The base of %gs always points to the bottom of the irqstack 252 * union. If the stack protector canary is enabled, it is 253 * located at %gs:40. Note that, on SMP, the boot cpu uses 254 * init data section till per cpu areas are set up. 255 */ 256 movl $MSR_GS_BASE,%ecx 257 movl initial_gs(%rip),%eax 258 movl initial_gs+4(%rip),%edx 259 wrmsr 260 261 /* rsi is pointer to real mode structure with interesting info. 262 pass it to C */ 263 movq %rsi, %rdi 264 265 /* Finally jump to run C code and to be on real kernel address 266 * Since we are running on identity-mapped space we have to jump 267 * to the full 64bit address, this is only possible as indirect 268 * jump. In addition we need to ensure %cs is set so we make this 269 * a far return. 270 * 271 * Note: do not change to far jump indirect with 64bit offset. 272 * 273 * AMD does not support far jump indirect with 64bit offset. 274 * AMD64 Architecture Programmer's Manual, Volume 3: states only 275 * JMP FAR mem16:16 FF /5 Far jump indirect, 276 * with the target specified by a far pointer in memory. 277 * JMP FAR mem16:32 FF /5 Far jump indirect, 278 * with the target specified by a far pointer in memory. 279 * 280 * Intel64 does support 64bit offset. 281 * Software Developer Manual Vol 2: states: 282 * FF /5 JMP m16:16 Jump far, absolute indirect, 283 * address given in m16:16 284 * FF /5 JMP m16:32 Jump far, absolute indirect, 285 * address given in m16:32. 286 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 287 * address given in m16:64. 288 */ 289 movq initial_code(%rip),%rax 290 pushq $0 # fake return address to stop unwinder 291 pushq $__KERNEL_CS # set correct cs 292 pushq %rax # target address in negative space 293 lretq 294 295#include "verify_cpu.S" 296 297#ifdef CONFIG_HOTPLUG_CPU 298/* 299 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 300 * up already except stack. We just set up stack here. Then call 301 * start_secondary(). 302 */ 303ENTRY(start_cpu0) 304 movq stack_start(%rip),%rsp 305 movq initial_code(%rip),%rax 306 pushq $0 # fake return address to stop unwinder 307 pushq $__KERNEL_CS # set correct cs 308 pushq %rax # target address in negative space 309 lretq 310ENDPROC(start_cpu0) 311#endif 312 313 /* SMP bootup changes these two */ 314 __REFDATA 315 .balign 8 316 GLOBAL(initial_code) 317 .quad x86_64_start_kernel 318 GLOBAL(initial_gs) 319 .quad INIT_PER_CPU_VAR(irq_stack_union) 320 321 GLOBAL(stack_start) 322 .quad init_thread_union+THREAD_SIZE-8 323 .word 0 324 __FINITDATA 325 326bad_address: 327 jmp bad_address 328 329 __INIT 330ENTRY(early_idt_handler_array) 331 # 104(%rsp) %rflags 332 # 96(%rsp) %cs 333 # 88(%rsp) %rip 334 # 80(%rsp) error code 335 i = 0 336 .rept NUM_EXCEPTION_VECTORS 337 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 338 pushq $0 # Dummy error code, to make stack frame uniform 339 .endif 340 pushq $i # 72(%rsp) Vector number 341 jmp early_idt_handler_common 342 i = i + 1 343 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 344 .endr 345ENDPROC(early_idt_handler_array) 346 347early_idt_handler_common: 348 /* 349 * The stack is the hardware frame, an error code or zero, and the 350 * vector number. 351 */ 352 cld 353 354 cmpl $2,(%rsp) # X86_TRAP_NMI 355 je .Lis_nmi # Ignore NMI 356 357 cmpl $2,early_recursion_flag(%rip) 358 jz 1f 359 incl early_recursion_flag(%rip) 360 361 pushq %rax # 64(%rsp) 362 pushq %rcx # 56(%rsp) 363 pushq %rdx # 48(%rsp) 364 pushq %rsi # 40(%rsp) 365 pushq %rdi # 32(%rsp) 366 pushq %r8 # 24(%rsp) 367 pushq %r9 # 16(%rsp) 368 pushq %r10 # 8(%rsp) 369 pushq %r11 # 0(%rsp) 370 371 cmpl $__KERNEL_CS,96(%rsp) 372 jne 11f 373 374 cmpl $14,72(%rsp) # Page fault? 375 jnz 10f 376 GET_CR2_INTO(%rdi) # can clobber any volatile register if pv 377 call early_make_pgtable 378 andl %eax,%eax 379 jz 20f # All good 380 38110: 382 leaq 88(%rsp),%rdi # Pointer to %rip 383 call early_fixup_exception 384 andl %eax,%eax 385 jnz 20f # Found an exception entry 386 38711: 388#ifdef CONFIG_EARLY_PRINTK 389 GET_CR2_INTO(%r9) # can clobber any volatile register if pv 390 movl 80(%rsp),%r8d # error code 391 movl 72(%rsp),%esi # vector number 392 movl 96(%rsp),%edx # %cs 393 movq 88(%rsp),%rcx # %rip 394 xorl %eax,%eax 395 leaq early_idt_msg(%rip),%rdi 396 call early_printk 397 cmpl $2,early_recursion_flag(%rip) 398 jz 1f 399 call dump_stack 400#ifdef CONFIG_KALLSYMS 401 leaq early_idt_ripmsg(%rip),%rdi 402 movq 40(%rsp),%rsi # %rip again 403 call __print_symbol 404#endif 405#endif /* EARLY_PRINTK */ 4061: hlt 407 jmp 1b 408 40920: # Exception table entry found or page table generated 410 popq %r11 411 popq %r10 412 popq %r9 413 popq %r8 414 popq %rdi 415 popq %rsi 416 popq %rdx 417 popq %rcx 418 popq %rax 419 decl early_recursion_flag(%rip) 420.Lis_nmi: 421 addq $16,%rsp # drop vector number and error code 422 INTERRUPT_RETURN 423ENDPROC(early_idt_handler_common) 424 425 __INITDATA 426 427 .balign 4 428early_recursion_flag: 429 .long 0 430 431#ifdef CONFIG_EARLY_PRINTK 432early_idt_msg: 433 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 434early_idt_ripmsg: 435 .asciz "RIP %s\n" 436#endif /* CONFIG_EARLY_PRINTK */ 437 438#define NEXT_PAGE(name) \ 439 .balign PAGE_SIZE; \ 440GLOBAL(name) 441 442#ifdef CONFIG_PAGE_TABLE_ISOLATION 443/* 444 * Each PGD needs to be 8k long and 8k aligned. We do not 445 * ever go out to userspace with these, so we do not 446 * strictly *need* the second page, but this allows us to 447 * have a single set_pgd() implementation that does not 448 * need to worry about whether it has 4k or 8k to work 449 * with. 450 * 451 * This ensures PGDs are 8k long: 452 */ 453#define KAISER_USER_PGD_FILL 512 454/* This ensures they are 8k-aligned: */ 455#define NEXT_PGD_PAGE(name) \ 456 .balign 2 * PAGE_SIZE; \ 457GLOBAL(name) 458#else 459#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) 460#define KAISER_USER_PGD_FILL 0 461#endif 462 463/* Automate the creation of 1 to 1 mapping pmd entries */ 464#define PMDS(START, PERM, COUNT) \ 465 i = 0 ; \ 466 .rept (COUNT) ; \ 467 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 468 i = i + 1 ; \ 469 .endr 470 471 __INITDATA 472NEXT_PGD_PAGE(early_level4_pgt) 473 .fill 511,8,0 474 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 475 .fill KAISER_USER_PGD_FILL,8,0 476 477NEXT_PAGE(early_dynamic_pgts) 478 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 479 480 .data 481 482#ifndef CONFIG_XEN 483NEXT_PGD_PAGE(init_level4_pgt) 484 .fill 512,8,0 485 .fill KAISER_USER_PGD_FILL,8,0 486#else 487NEXT_PGD_PAGE(init_level4_pgt) 488 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 489 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 490 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 491 .org init_level4_pgt + L4_START_KERNEL*8, 0 492 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 493 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 494 .fill KAISER_USER_PGD_FILL,8,0 495 496NEXT_PAGE(level3_ident_pgt) 497 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 498 .fill 511, 8, 0 499NEXT_PAGE(level2_ident_pgt) 500 /* Since I easily can, map the first 1G. 501 * Don't set NX because code runs from these pages. 502 */ 503 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 504#endif 505 .fill KAISER_USER_PGD_FILL,8,0 506 507NEXT_PAGE(level3_kernel_pgt) 508 .fill L3_START_KERNEL,8,0 509 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 510 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 511 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 512 513NEXT_PAGE(level2_kernel_pgt) 514 /* 515 * 512 MB kernel mapping. We spend a full page on this pagetable 516 * anyway. 517 * 518 * The kernel code+data+bss must not be bigger than that. 519 * 520 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 521 * If you want to increase this then increase MODULES_VADDR 522 * too.) 523 */ 524 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 525 KERNEL_IMAGE_SIZE/PMD_SIZE) 526 527NEXT_PAGE(level2_fixmap_pgt) 528 .fill 506,8,0 529 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 530 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 531 .fill 5,8,0 532 533NEXT_PAGE(level1_fixmap_pgt) 534 .fill 512,8,0 535 536#undef PMDS 537 538 .data 539 .align 16 540 .globl early_gdt_descr 541early_gdt_descr: 542 .word GDT_ENTRIES*8-1 543early_gdt_descr_base: 544 .quad INIT_PER_CPU_VAR(gdt_page) 545 546ENTRY(phys_base) 547 /* This must match the first entry in level2_kernel_pgt */ 548 .quad 0x0000000000000000 549 550#include "../../x86/xen/xen-head.S" 551 552 __PAGE_ALIGNED_BSS 553NEXT_PAGE(empty_zero_page) 554 .skip PAGE_SIZE 555 556