• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------- *
2  *
3  *   Copyright 2014 Intel Corporation; author: H. Peter Anvin
4  *
5  *   This program is free software; you can redistribute it and/or modify it
6  *   under the terms and conditions of the GNU General Public License,
7  *   version 2, as published by the Free Software Foundation.
8  *
9  *   This program is distributed in the hope it will be useful, but WITHOUT
10  *   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  *   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  *   more details.
13  *
14  * ----------------------------------------------------------------------- */
15 
16 /*
17  * The IRET instruction, when returning to a 16-bit segment, only
18  * restores the bottom 16 bits of the user space stack pointer.  This
19  * causes some 16-bit software to break, but it also leaks kernel state
20  * to user space.
21  *
22  * This works around this by creating percpu "ministacks", each of which
23  * is mapped 2^16 times 64K apart.  When we detect that the return SS is
24  * on the LDT, we copy the IRET frame to the ministack and use the
25  * relevant alias to return to userspace.  The ministacks are mapped
26  * readonly, so if the IRET fault we promote #GP to #DF which is an IST
27  * vector and thus has its own stack; we then do the fixup in the #DF
28  * handler.
29  *
30  * This file sets up the ministacks and the related page tables.  The
31  * actual ministack invocation is in entry_64.S.
32  */
33 
34 #include <linux/init.h>
35 #include <linux/init_task.h>
36 #include <linux/kernel.h>
37 #include <linux/percpu.h>
38 #include <linux/gfp.h>
39 #include <linux/random.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
42 #include <asm/setup.h>
43 #include <asm/espfix.h>
44 #include <asm/kaiser.h>
45 
46 /*
47  * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
48  * it up to a cache line to avoid unnecessary sharing.
49  */
50 #define ESPFIX_STACK_SIZE	(8*8UL)
51 #define ESPFIX_STACKS_PER_PAGE	(PAGE_SIZE/ESPFIX_STACK_SIZE)
52 
53 /* There is address space for how many espfix pages? */
54 #define ESPFIX_PAGE_SPACE	(1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
55 
56 #define ESPFIX_MAX_CPUS		(ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
57 #if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
58 # error "Need more than one PGD for the ESPFIX hack"
59 #endif
60 
61 #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
62 
63 /* This contains the *bottom* address of the espfix stack */
64 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
65 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
66 
67 /* Initialization mutex - should this be a spinlock? */
68 static DEFINE_MUTEX(espfix_init_mutex);
69 
70 /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
71 #define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
72 static void *espfix_pages[ESPFIX_MAX_PAGES];
73 
74 static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
75 	__aligned(PAGE_SIZE);
76 
77 static unsigned int page_random, slot_random;
78 
79 /*
80  * This returns the bottom address of the espfix stack for a specific CPU.
81  * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
82  * we have to account for some amount of padding at the end of each page.
83  */
espfix_base_addr(unsigned int cpu)84 static inline unsigned long espfix_base_addr(unsigned int cpu)
85 {
86 	unsigned long page, slot;
87 	unsigned long addr;
88 
89 	page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
90 	slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
91 	addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
92 	addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
93 	addr += ESPFIX_BASE_ADDR;
94 	return addr;
95 }
96 
97 #define PTE_STRIDE        (65536/PAGE_SIZE)
98 #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
99 #define ESPFIX_PMD_CLONES PTRS_PER_PMD
100 #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
101 
102 #define PGTABLE_PROT	  ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
103 
init_espfix_random(void)104 static void init_espfix_random(void)
105 {
106 	unsigned long rand;
107 
108 	/*
109 	 * This is run before the entropy pools are initialized,
110 	 * but this is hopefully better than nothing.
111 	 */
112 	if (!arch_get_random_long(&rand)) {
113 		/* The constant is an arbitrary large prime */
114 		rand = rdtsc();
115 		rand *= 0xc345c6b72fd16123UL;
116 	}
117 
118 	slot_random = rand % ESPFIX_STACKS_PER_PAGE;
119 	page_random = (rand / ESPFIX_STACKS_PER_PAGE)
120 		& (ESPFIX_PAGE_SPACE - 1);
121 }
122 
init_espfix_bsp(void)123 void __init init_espfix_bsp(void)
124 {
125 	pgd_t *pgd_p;
126 
127 	/* Install the espfix pud into the kernel page directory */
128 	pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
129 	pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
130 	/*
131 	 * Just copy the top-level PGD that is mapping the espfix
132 	 * area to ensure it is mapped into the shadow user page
133 	 * tables.
134 	 */
135 	if (kaiser_enabled) {
136 		set_pgd(native_get_shadow_pgd(pgd_p),
137 			__pgd(_KERNPG_TABLE | __pa((pud_t *)espfix_pud_page)));
138 	}
139 
140 	/* Randomize the locations */
141 	init_espfix_random();
142 
143 	/* The rest is the same as for any other processor */
144 	init_espfix_ap(0);
145 }
146 
init_espfix_ap(int cpu)147 void init_espfix_ap(int cpu)
148 {
149 	unsigned int page;
150 	unsigned long addr;
151 	pud_t pud, *pud_p;
152 	pmd_t pmd, *pmd_p;
153 	pte_t pte, *pte_p;
154 	int n, node;
155 	void *stack_page;
156 	pteval_t ptemask;
157 
158 	/* We only have to do this once... */
159 	if (likely(per_cpu(espfix_stack, cpu)))
160 		return;		/* Already initialized */
161 
162 	addr = espfix_base_addr(cpu);
163 	page = cpu/ESPFIX_STACKS_PER_PAGE;
164 
165 	/* Did another CPU already set this up? */
166 	stack_page = ACCESS_ONCE(espfix_pages[page]);
167 	if (likely(stack_page))
168 		goto done;
169 
170 	mutex_lock(&espfix_init_mutex);
171 
172 	/* Did we race on the lock? */
173 	stack_page = ACCESS_ONCE(espfix_pages[page]);
174 	if (stack_page)
175 		goto unlock_done;
176 
177 	node = cpu_to_node(cpu);
178 	ptemask = __supported_pte_mask;
179 
180 	pud_p = &espfix_pud_page[pud_index(addr)];
181 	pud = *pud_p;
182 	if (!pud_present(pud)) {
183 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
184 
185 		pmd_p = (pmd_t *)page_address(page);
186 		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
187 		paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
188 		for (n = 0; n < ESPFIX_PUD_CLONES; n++)
189 			set_pud(&pud_p[n], pud);
190 	}
191 
192 	pmd_p = pmd_offset(&pud, addr);
193 	pmd = *pmd_p;
194 	if (!pmd_present(pmd)) {
195 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
196 
197 		pte_p = (pte_t *)page_address(page);
198 		pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
199 		paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
200 		for (n = 0; n < ESPFIX_PMD_CLONES; n++)
201 			set_pmd(&pmd_p[n], pmd);
202 	}
203 
204 	pte_p = pte_offset_kernel(&pmd, addr);
205 	stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
206 	pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
207 	for (n = 0; n < ESPFIX_PTE_CLONES; n++)
208 		set_pte(&pte_p[n*PTE_STRIDE], pte);
209 
210 	/* Job is done for this CPU and any CPU which shares this page */
211 	ACCESS_ONCE(espfix_pages[page]) = stack_page;
212 
213 unlock_done:
214 	mutex_unlock(&espfix_init_mutex);
215 done:
216 	per_cpu(espfix_stack, cpu) = addr;
217 	per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
218 				      + (addr & ~PAGE_MASK);
219 }
220