1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PAGE_64_DEFS_H 3 #define _ASM_X86_PAGE_64_DEFS_H 4 5 #ifndef __ASSEMBLY__ 6 #include <asm/kaslr.h> 7 #endif 8 9 #ifdef CONFIG_KASAN 10 #define KASAN_STACK_ORDER 1 11 #else 12 #define KASAN_STACK_ORDER 0 13 #endif 14 15 #define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) 16 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 17 18 #define EXCEPTION_STACK_ORDER (1 + KASAN_STACK_ORDER) 19 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20 21 #define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER) 22 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) 23 24 /* 25 * The index for the tss.ist[] array. The hardware limit is 7 entries. 26 */ 27 #define IST_INDEX_DF 0 28 #define IST_INDEX_NMI 1 29 #define IST_INDEX_DB 2 30 #define IST_INDEX_MCE 3 31 #define IST_INDEX_VC 4 32 33 /* 34 * Set __PAGE_OFFSET to the most negative possible address + 35 * PGDIR_SIZE*17 (pgd slot 273). 36 * 37 * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for 38 * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, 39 * but it's what Xen requires. 40 */ 41 #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) 42 #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) 43 44 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 45 #define __PAGE_OFFSET page_offset_base 46 #else 47 #define __PAGE_OFFSET __PAGE_OFFSET_BASE_L4 48 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ 49 50 #define __START_KERNEL_map _AC(0xffffffff80000000, UL) 51 52 /* See Documentation/x86/x86_64/mm.rst for a description of the memory map. */ 53 54 #define __PHYSICAL_MASK_SHIFT 52 55 56 #ifdef CONFIG_X86_5LEVEL 57 #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47) 58 #else 59 #define __VIRTUAL_MASK_SHIFT 47 60 #endif 61 62 /* 63 * User space process size. This is the first address outside the user range. 64 * There are a few constraints that determine this: 65 * 66 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical 67 * address, then that syscall will enter the kernel with a 68 * non-canonical return address, and SYSRET will explode dangerously. 69 * We avoid this particular problem by preventing anything executable 70 * from being mapped at the maximum canonical address. 71 * 72 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the 73 * CPUs malfunction if they execute code from the highest canonical page. 74 * They'll speculate right off the end of the canonical space, and 75 * bad things happen. This is worked around in the same way as the 76 * Intel problem. 77 * 78 * With page table isolation enabled, we map the LDT in ... [stay tuned] 79 */ 80 #define TASK_SIZE_MAX ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) 81 82 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 83 84 /* This decides where the kernel will search for a free chunk of vm 85 * space during mmap's. 86 */ 87 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 88 0xc0000000 : 0xFFFFe000) 89 90 #define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \ 91 IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW) 92 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ 93 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 94 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ 95 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 96 97 #define STACK_TOP TASK_SIZE_LOW 98 #define STACK_TOP_MAX TASK_SIZE_MAX 99 100 /* 101 * Maximum kernel image size is limited to 1 GiB, due to the fixmap living 102 * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). 103 * 104 * On KASLR use 1 GiB by default, leaving 1 GiB for modules once the 105 * page tables are fully set up. 106 * 107 * If KASLR is disabled we can shrink it to 0.5 GiB and increase the size 108 * of the modules area to 1.5 GiB. 109 */ 110 #ifdef CONFIG_RANDOMIZE_BASE 111 #define KERNEL_IMAGE_SIZE (1024 * 1024 * 1024) 112 #else 113 #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 114 #endif 115 116 #endif /* _ASM_X86_PAGE_64_DEFS_H */ 117