• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5  * Copyright (C) 2002 Andi Kleen
6  *
7  * This handles calls from both 32bit and 64bit mode.
8  *
9  * Lock order:
10  *	contex.ldt_usr_sem
11  *	  mmap_sem
12  *	    context.lock
13  */
14 
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
25 
26 #include <asm/ldt.h>
27 #include <asm/tlb.h>
28 #include <asm/desc.h>
29 #include <asm/mmu_context.h>
30 #include <asm/syscalls.h>
31 
refresh_ldt_segments(void)32 static void refresh_ldt_segments(void)
33 {
34 #ifdef CONFIG_X86_64
35 	unsigned short sel;
36 
37 	/*
38 	 * Make sure that the cached DS and ES descriptors match the updated
39 	 * LDT.
40 	 */
41 	savesegment(ds, sel);
42 	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
43 		loadsegment(ds, sel);
44 
45 	savesegment(es, sel);
46 	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
47 		loadsegment(es, sel);
48 #endif
49 }
50 
51 /* context.lock is held by the task which issued the smp function call */
flush_ldt(void * __mm)52 static void flush_ldt(void *__mm)
53 {
54 	struct mm_struct *mm = __mm;
55 
56 	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
57 		return;
58 
59 	load_mm_ldt(mm);
60 
61 	refresh_ldt_segments();
62 }
63 
64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
alloc_ldt_struct(unsigned int num_entries)65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
66 {
67 	struct ldt_struct *new_ldt;
68 	unsigned int alloc_size;
69 
70 	if (num_entries > LDT_ENTRIES)
71 		return NULL;
72 
73 	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
74 	if (!new_ldt)
75 		return NULL;
76 
77 	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
78 	alloc_size = num_entries * LDT_ENTRY_SIZE;
79 
80 	/*
81 	 * Xen is very picky: it requires a page-aligned LDT that has no
82 	 * trailing nonzero bytes in any page that contains LDT descriptors.
83 	 * Keep it simple: zero the whole allocation and never allocate less
84 	 * than PAGE_SIZE.
85 	 */
86 	if (alloc_size > PAGE_SIZE)
87 		new_ldt->entries = vzalloc(alloc_size);
88 	else
89 		new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
90 
91 	if (!new_ldt->entries) {
92 		kfree(new_ldt);
93 		return NULL;
94 	}
95 
96 	/* The new LDT isn't aliased for PTI yet. */
97 	new_ldt->slot = -1;
98 
99 	new_ldt->nr_entries = num_entries;
100 	return new_ldt;
101 }
102 
103 /*
104  * If PTI is enabled, this maps the LDT into the kernelmode and
105  * usermode tables for the given mm.
106  */
107 static int
map_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt,int slot)108 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
109 {
110 #ifdef CONFIG_PAGE_TABLE_ISOLATION
111 	bool is_vmalloc, had_top_level_entry;
112 	unsigned long va;
113 	spinlock_t *ptl;
114 	int i, nr_pages;
115 	pgd_t *pgd;
116 
117 	if (!static_cpu_has(X86_FEATURE_PTI))
118 		return 0;
119 
120 	/*
121 	 * Any given ldt_struct should have map_ldt_struct() called at most
122 	 * once.
123 	 */
124 	WARN_ON(ldt->slot != -1);
125 
126 	/*
127 	 * Did we already have the top level entry allocated?  We can't
128 	 * use pgd_none() for this because it doens't do anything on
129 	 * 4-level page table kernels.
130 	 */
131 	pgd = pgd_offset(mm, LDT_BASE_ADDR);
132 	had_top_level_entry = (pgd->pgd != 0);
133 
134 	is_vmalloc = is_vmalloc_addr(ldt->entries);
135 
136 	nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
137 
138 	for (i = 0; i < nr_pages; i++) {
139 		unsigned long offset = i << PAGE_SHIFT;
140 		const void *src = (char *)ldt->entries + offset;
141 		unsigned long pfn;
142 		pte_t pte, *ptep;
143 
144 		va = (unsigned long)ldt_slot_va(slot) + offset;
145 		pfn = is_vmalloc ? vmalloc_to_pfn(src) :
146 			page_to_pfn(virt_to_page(src));
147 		/*
148 		 * Treat the PTI LDT range as a *userspace* range.
149 		 * get_locked_pte() will allocate all needed pagetables
150 		 * and account for them in this mm.
151 		 */
152 		ptep = get_locked_pte(mm, va, &ptl);
153 		if (!ptep)
154 			return -ENOMEM;
155 		/*
156 		 * Map it RO so the easy to find address is not a primary
157 		 * target via some kernel interface which misses a
158 		 * permission check.
159 		 */
160 		pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
161 		set_pte_at(mm, va, ptep, pte);
162 		pte_unmap_unlock(ptep, ptl);
163 	}
164 
165 	if (mm->context.ldt) {
166 		/*
167 		 * We already had an LDT.  The top-level entry should already
168 		 * have been allocated and synchronized with the usermode
169 		 * tables.
170 		 */
171 		WARN_ON(!had_top_level_entry);
172 		if (static_cpu_has(X86_FEATURE_PTI))
173 			WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
174 	} else {
175 		/*
176 		 * This is the first time we're mapping an LDT for this process.
177 		 * Sync the pgd to the usermode tables.
178 		 */
179 		WARN_ON(had_top_level_entry);
180 		if (static_cpu_has(X86_FEATURE_PTI)) {
181 			WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
182 			set_pgd(kernel_to_user_pgdp(pgd), *pgd);
183 		}
184 	}
185 
186 	ldt->slot = slot;
187 #endif
188 	return 0;
189 }
190 
unmap_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt)191 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
192 {
193 #ifdef CONFIG_PAGE_TABLE_ISOLATION
194 	unsigned long va;
195 	int i, nr_pages;
196 
197 	if (!ldt)
198 		return;
199 
200 	/* LDT map/unmap is only required for PTI */
201 	if (!static_cpu_has(X86_FEATURE_PTI))
202 		return;
203 
204 	nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
205 
206 	for (i = 0; i < nr_pages; i++) {
207 		unsigned long offset = i << PAGE_SHIFT;
208 		spinlock_t *ptl;
209 		pte_t *ptep;
210 
211 		va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
212 		ptep = get_locked_pte(mm, va, &ptl);
213 		pte_clear(mm, va, ptep);
214 		pte_unmap_unlock(ptep, ptl);
215 	}
216 
217 	va = (unsigned long)ldt_slot_va(ldt->slot);
218 	flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0);
219 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
220 }
221 
free_ldt_pgtables(struct mm_struct * mm)222 static void free_ldt_pgtables(struct mm_struct *mm)
223 {
224 #ifdef CONFIG_PAGE_TABLE_ISOLATION
225 	struct mmu_gather tlb;
226 	unsigned long start = LDT_BASE_ADDR;
227 	unsigned long end = start + (1UL << PGDIR_SHIFT);
228 
229 	if (!static_cpu_has(X86_FEATURE_PTI))
230 		return;
231 
232 	tlb_gather_mmu(&tlb, mm, start, end);
233 	free_pgd_range(&tlb, start, end, start, end);
234 	tlb_finish_mmu(&tlb, start, end);
235 #endif
236 }
237 
238 /* After calling this, the LDT is immutable. */
finalize_ldt_struct(struct ldt_struct * ldt)239 static void finalize_ldt_struct(struct ldt_struct *ldt)
240 {
241 	paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
242 }
243 
install_ldt(struct mm_struct * mm,struct ldt_struct * ldt)244 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
245 {
246 	mutex_lock(&mm->context.lock);
247 
248 	/* Synchronizes with READ_ONCE in load_mm_ldt. */
249 	smp_store_release(&mm->context.ldt, ldt);
250 
251 	/* Activate the LDT for all CPUs using currents mm. */
252 	on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
253 
254 	mutex_unlock(&mm->context.lock);
255 }
256 
free_ldt_struct(struct ldt_struct * ldt)257 static void free_ldt_struct(struct ldt_struct *ldt)
258 {
259 	if (likely(!ldt))
260 		return;
261 
262 	paravirt_free_ldt(ldt->entries, ldt->nr_entries);
263 	if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
264 		vfree_atomic(ldt->entries);
265 	else
266 		free_page((unsigned long)ldt->entries);
267 	kfree(ldt);
268 }
269 
270 /*
271  * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
272  * the new task is not running, so nothing can be installed.
273  */
ldt_dup_context(struct mm_struct * old_mm,struct mm_struct * mm)274 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
275 {
276 	struct ldt_struct *new_ldt;
277 	int retval = 0;
278 
279 	if (!old_mm)
280 		return 0;
281 
282 	mutex_lock(&old_mm->context.lock);
283 	if (!old_mm->context.ldt)
284 		goto out_unlock;
285 
286 	new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
287 	if (!new_ldt) {
288 		retval = -ENOMEM;
289 		goto out_unlock;
290 	}
291 
292 	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
293 	       new_ldt->nr_entries * LDT_ENTRY_SIZE);
294 	finalize_ldt_struct(new_ldt);
295 
296 	retval = map_ldt_struct(mm, new_ldt, 0);
297 	if (retval) {
298 		free_ldt_pgtables(mm);
299 		free_ldt_struct(new_ldt);
300 		goto out_unlock;
301 	}
302 	mm->context.ldt = new_ldt;
303 
304 out_unlock:
305 	mutex_unlock(&old_mm->context.lock);
306 	return retval;
307 }
308 
309 /*
310  * No need to lock the MM as we are the last user
311  *
312  * 64bit: Don't touch the LDT register - we're already in the next thread.
313  */
destroy_context_ldt(struct mm_struct * mm)314 void destroy_context_ldt(struct mm_struct *mm)
315 {
316 	free_ldt_struct(mm->context.ldt);
317 	mm->context.ldt = NULL;
318 }
319 
ldt_arch_exit_mmap(struct mm_struct * mm)320 void ldt_arch_exit_mmap(struct mm_struct *mm)
321 {
322 	free_ldt_pgtables(mm);
323 }
324 
read_ldt(void __user * ptr,unsigned long bytecount)325 static int read_ldt(void __user *ptr, unsigned long bytecount)
326 {
327 	struct mm_struct *mm = current->mm;
328 	unsigned long entries_size;
329 	int retval;
330 
331 	down_read(&mm->context.ldt_usr_sem);
332 
333 	if (!mm->context.ldt) {
334 		retval = 0;
335 		goto out_unlock;
336 	}
337 
338 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
339 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
340 
341 	entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
342 	if (entries_size > bytecount)
343 		entries_size = bytecount;
344 
345 	if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
346 		retval = -EFAULT;
347 		goto out_unlock;
348 	}
349 
350 	if (entries_size != bytecount) {
351 		/* Zero-fill the rest and pretend we read bytecount bytes. */
352 		if (clear_user(ptr + entries_size, bytecount - entries_size)) {
353 			retval = -EFAULT;
354 			goto out_unlock;
355 		}
356 	}
357 	retval = bytecount;
358 
359 out_unlock:
360 	up_read(&mm->context.ldt_usr_sem);
361 	return retval;
362 }
363 
read_default_ldt(void __user * ptr,unsigned long bytecount)364 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
365 {
366 	/* CHECKME: Can we use _one_ random number ? */
367 #ifdef CONFIG_X86_32
368 	unsigned long size = 5 * sizeof(struct desc_struct);
369 #else
370 	unsigned long size = 128;
371 #endif
372 	if (bytecount > size)
373 		bytecount = size;
374 	if (clear_user(ptr, bytecount))
375 		return -EFAULT;
376 	return bytecount;
377 }
378 
write_ldt(void __user * ptr,unsigned long bytecount,int oldmode)379 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
380 {
381 	struct mm_struct *mm = current->mm;
382 	struct ldt_struct *new_ldt, *old_ldt;
383 	unsigned int old_nr_entries, new_nr_entries;
384 	struct user_desc ldt_info;
385 	struct desc_struct ldt;
386 	int error;
387 
388 	error = -EINVAL;
389 	if (bytecount != sizeof(ldt_info))
390 		goto out;
391 	error = -EFAULT;
392 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
393 		goto out;
394 
395 	error = -EINVAL;
396 	if (ldt_info.entry_number >= LDT_ENTRIES)
397 		goto out;
398 	if (ldt_info.contents == 3) {
399 		if (oldmode)
400 			goto out;
401 		if (ldt_info.seg_not_present == 0)
402 			goto out;
403 	}
404 
405 	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
406 	    LDT_empty(&ldt_info)) {
407 		/* The user wants to clear the entry. */
408 		memset(&ldt, 0, sizeof(ldt));
409 	} else {
410 		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
411 			error = -EINVAL;
412 			goto out;
413 		}
414 
415 		fill_ldt(&ldt, &ldt_info);
416 		if (oldmode)
417 			ldt.avl = 0;
418 	}
419 
420 	if (down_write_killable(&mm->context.ldt_usr_sem))
421 		return -EINTR;
422 
423 	old_ldt       = mm->context.ldt;
424 	old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
425 	new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
426 
427 	error = -ENOMEM;
428 	new_ldt = alloc_ldt_struct(new_nr_entries);
429 	if (!new_ldt)
430 		goto out_unlock;
431 
432 	if (old_ldt)
433 		memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
434 
435 	new_ldt->entries[ldt_info.entry_number] = ldt;
436 	finalize_ldt_struct(new_ldt);
437 
438 	/*
439 	 * If we are using PTI, map the new LDT into the userspace pagetables.
440 	 * If there is already an LDT, use the other slot so that other CPUs
441 	 * will continue to use the old LDT until install_ldt() switches
442 	 * them over to the new LDT.
443 	 */
444 	error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
445 	if (error) {
446 		/*
447 		 * This only can fail for the first LDT setup. If an LDT is
448 		 * already installed then the PTE page is already
449 		 * populated. Mop up a half populated page table.
450 		 */
451 		if (!WARN_ON_ONCE(old_ldt))
452 			free_ldt_pgtables(mm);
453 		free_ldt_struct(new_ldt);
454 		goto out_unlock;
455 	}
456 
457 	install_ldt(mm, new_ldt);
458 	unmap_ldt_struct(mm, old_ldt);
459 	free_ldt_struct(old_ldt);
460 	error = 0;
461 
462 out_unlock:
463 	up_write(&mm->context.ldt_usr_sem);
464 out:
465 	return error;
466 }
467 
SYSCALL_DEFINE3(modify_ldt,int,func,void __user *,ptr,unsigned long,bytecount)468 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
469 		unsigned long , bytecount)
470 {
471 	int ret = -ENOSYS;
472 
473 	switch (func) {
474 	case 0:
475 		ret = read_ldt(ptr, bytecount);
476 		break;
477 	case 1:
478 		ret = write_ldt(ptr, bytecount, 1);
479 		break;
480 	case 2:
481 		ret = read_default_ldt(ptr, bytecount);
482 		break;
483 	case 0x11:
484 		ret = write_ldt(ptr, bytecount, 0);
485 		break;
486 	}
487 	/*
488 	 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
489 	 * return type, but tht ABI for sys_modify_ldt() expects
490 	 * 'int'.  This cast gives us an int-sized value in %rax
491 	 * for the return code.  The 'unsigned' is necessary so
492 	 * the compiler does not try to sign-extend the negative
493 	 * return codes into the high half of the register when
494 	 * taking the value from int->long.
495 	 */
496 	return (unsigned int)ret;
497 }
498