1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/ptrace.h>
14 #include <linux/regset.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/module.h>
25
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/processor.h>
29 #include <asm/i387.h>
30 #include <asm/fpu-internal.h>
31 #include <asm/debugreg.h>
32 #include <asm/ldt.h>
33 #include <asm/desc.h>
34 #include <asm/prctl.h>
35 #include <asm/proto.h>
36 #include <asm/hw_breakpoint.h>
37 #include <asm/traps.h>
38
39 #include "tls.h"
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43
44 enum x86_regset {
45 REGSET_GENERAL,
46 REGSET_FP,
47 REGSET_XFP,
48 REGSET_IOPERM64 = REGSET_XFP,
49 REGSET_XSTATE,
50 REGSET_TLS,
51 REGSET_IOPERM32,
52 };
53
54 struct pt_regs_offset {
55 const char *name;
56 int offset;
57 };
58
59 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
60 #define REG_OFFSET_END {.name = NULL, .offset = 0}
61
62 static const struct pt_regs_offset regoffset_table[] = {
63 #ifdef CONFIG_X86_64
64 REG_OFFSET_NAME(r15),
65 REG_OFFSET_NAME(r14),
66 REG_OFFSET_NAME(r13),
67 REG_OFFSET_NAME(r12),
68 REG_OFFSET_NAME(r11),
69 REG_OFFSET_NAME(r10),
70 REG_OFFSET_NAME(r9),
71 REG_OFFSET_NAME(r8),
72 #endif
73 REG_OFFSET_NAME(bx),
74 REG_OFFSET_NAME(cx),
75 REG_OFFSET_NAME(dx),
76 REG_OFFSET_NAME(si),
77 REG_OFFSET_NAME(di),
78 REG_OFFSET_NAME(bp),
79 REG_OFFSET_NAME(ax),
80 #ifdef CONFIG_X86_32
81 REG_OFFSET_NAME(ds),
82 REG_OFFSET_NAME(es),
83 REG_OFFSET_NAME(fs),
84 REG_OFFSET_NAME(gs),
85 #endif
86 REG_OFFSET_NAME(orig_ax),
87 REG_OFFSET_NAME(ip),
88 REG_OFFSET_NAME(cs),
89 REG_OFFSET_NAME(flags),
90 REG_OFFSET_NAME(sp),
91 REG_OFFSET_NAME(ss),
92 REG_OFFSET_END,
93 };
94
95 /**
96 * regs_query_register_offset() - query register offset from its name
97 * @name: the name of a register
98 *
99 * regs_query_register_offset() returns the offset of a register in struct
100 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
101 */
regs_query_register_offset(const char * name)102 int regs_query_register_offset(const char *name)
103 {
104 const struct pt_regs_offset *roff;
105 for (roff = regoffset_table; roff->name != NULL; roff++)
106 if (!strcmp(roff->name, name))
107 return roff->offset;
108 return -EINVAL;
109 }
110
111 /**
112 * regs_query_register_name() - query register name from its offset
113 * @offset: the offset of a register in struct pt_regs.
114 *
115 * regs_query_register_name() returns the name of a register from its
116 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
117 */
regs_query_register_name(unsigned int offset)118 const char *regs_query_register_name(unsigned int offset)
119 {
120 const struct pt_regs_offset *roff;
121 for (roff = regoffset_table; roff->name != NULL; roff++)
122 if (roff->offset == offset)
123 return roff->name;
124 return NULL;
125 }
126
127 static const int arg_offs_table[] = {
128 #ifdef CONFIG_X86_32
129 [0] = offsetof(struct pt_regs, ax),
130 [1] = offsetof(struct pt_regs, dx),
131 [2] = offsetof(struct pt_regs, cx)
132 #else /* CONFIG_X86_64 */
133 [0] = offsetof(struct pt_regs, di),
134 [1] = offsetof(struct pt_regs, si),
135 [2] = offsetof(struct pt_regs, dx),
136 [3] = offsetof(struct pt_regs, cx),
137 [4] = offsetof(struct pt_regs, r8),
138 [5] = offsetof(struct pt_regs, r9)
139 #endif
140 };
141
142 /*
143 * does not yet catch signals sent when the child dies.
144 * in exit.c or in signal.c.
145 */
146
147 /*
148 * Determines which flags the user has access to [1 = access, 0 = no access].
149 */
150 #define FLAG_MASK_32 ((unsigned long) \
151 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
152 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
153 X86_EFLAGS_SF | X86_EFLAGS_TF | \
154 X86_EFLAGS_DF | X86_EFLAGS_OF | \
155 X86_EFLAGS_RF | X86_EFLAGS_AC))
156
157 /*
158 * Determines whether a value may be installed in a segment register.
159 */
invalid_selector(u16 value)160 static inline bool invalid_selector(u16 value)
161 {
162 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
163 }
164
165 #ifdef CONFIG_X86_32
166
167 #define FLAG_MASK FLAG_MASK_32
168
169 /*
170 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
171 * when it traps. The previous stack will be directly underneath the saved
172 * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
173 *
174 * Now, if the stack is empty, '®s->sp' is out of range. In this
175 * case we try to take the previous stack. To always return a non-null
176 * stack pointer we fall back to regs as stack if no previous stack
177 * exists.
178 *
179 * This is valid only for kernel mode traps.
180 */
kernel_stack_pointer(struct pt_regs * regs)181 unsigned long kernel_stack_pointer(struct pt_regs *regs)
182 {
183 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
184 unsigned long sp = (unsigned long)®s->sp;
185 struct thread_info *tinfo;
186
187 if (context == (sp & ~(THREAD_SIZE - 1)))
188 return sp;
189
190 tinfo = (struct thread_info *)context;
191 if (tinfo->previous_esp)
192 return tinfo->previous_esp;
193
194 return (unsigned long)regs;
195 }
196 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
197
pt_regs_access(struct pt_regs * regs,unsigned long regno)198 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
199 {
200 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
201 return ®s->bx + (regno >> 2);
202 }
203
get_segment_reg(struct task_struct * task,unsigned long offset)204 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
205 {
206 /*
207 * Returning the value truncates it to 16 bits.
208 */
209 unsigned int retval;
210 if (offset != offsetof(struct user_regs_struct, gs))
211 retval = *pt_regs_access(task_pt_regs(task), offset);
212 else {
213 if (task == current)
214 retval = get_user_gs(task_pt_regs(task));
215 else
216 retval = task_user_gs(task);
217 }
218 return retval;
219 }
220
set_segment_reg(struct task_struct * task,unsigned long offset,u16 value)221 static int set_segment_reg(struct task_struct *task,
222 unsigned long offset, u16 value)
223 {
224 /*
225 * The value argument was already truncated to 16 bits.
226 */
227 if (invalid_selector(value))
228 return -EIO;
229
230 /*
231 * For %cs and %ss we cannot permit a null selector.
232 * We can permit a bogus selector as long as it has USER_RPL.
233 * Null selectors are fine for other segment registers, but
234 * we will never get back to user mode with invalid %cs or %ss
235 * and will take the trap in iret instead. Much code relies
236 * on user_mode() to distinguish a user trap frame (which can
237 * safely use invalid selectors) from a kernel trap frame.
238 */
239 switch (offset) {
240 case offsetof(struct user_regs_struct, cs):
241 case offsetof(struct user_regs_struct, ss):
242 if (unlikely(value == 0))
243 return -EIO;
244
245 default:
246 *pt_regs_access(task_pt_regs(task), offset) = value;
247 break;
248
249 case offsetof(struct user_regs_struct, gs):
250 if (task == current)
251 set_user_gs(task_pt_regs(task), value);
252 else
253 task_user_gs(task) = value;
254 }
255
256 return 0;
257 }
258
259 #else /* CONFIG_X86_64 */
260
261 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
262
pt_regs_access(struct pt_regs * regs,unsigned long offset)263 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
264 {
265 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
266 return ®s->r15 + (offset / sizeof(regs->r15));
267 }
268
get_segment_reg(struct task_struct * task,unsigned long offset)269 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
270 {
271 /*
272 * Returning the value truncates it to 16 bits.
273 */
274 unsigned int seg;
275
276 switch (offset) {
277 case offsetof(struct user_regs_struct, fs):
278 if (task == current) {
279 /* Older gas can't assemble movq %?s,%r?? */
280 asm("movl %%fs,%0" : "=r" (seg));
281 return seg;
282 }
283 return task->thread.fsindex;
284 case offsetof(struct user_regs_struct, gs):
285 if (task == current) {
286 asm("movl %%gs,%0" : "=r" (seg));
287 return seg;
288 }
289 return task->thread.gsindex;
290 case offsetof(struct user_regs_struct, ds):
291 if (task == current) {
292 asm("movl %%ds,%0" : "=r" (seg));
293 return seg;
294 }
295 return task->thread.ds;
296 case offsetof(struct user_regs_struct, es):
297 if (task == current) {
298 asm("movl %%es,%0" : "=r" (seg));
299 return seg;
300 }
301 return task->thread.es;
302
303 case offsetof(struct user_regs_struct, cs):
304 case offsetof(struct user_regs_struct, ss):
305 break;
306 }
307 return *pt_regs_access(task_pt_regs(task), offset);
308 }
309
set_segment_reg(struct task_struct * task,unsigned long offset,u16 value)310 static int set_segment_reg(struct task_struct *task,
311 unsigned long offset, u16 value)
312 {
313 /*
314 * The value argument was already truncated to 16 bits.
315 */
316 if (invalid_selector(value))
317 return -EIO;
318
319 switch (offset) {
320 case offsetof(struct user_regs_struct,fs):
321 /*
322 * If this is setting fs as for normal 64-bit use but
323 * setting fs_base has implicitly changed it, leave it.
324 */
325 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
326 task->thread.fs != 0) ||
327 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
328 task->thread.fs == 0))
329 break;
330 task->thread.fsindex = value;
331 if (task == current)
332 loadsegment(fs, task->thread.fsindex);
333 break;
334 case offsetof(struct user_regs_struct,gs):
335 /*
336 * If this is setting gs as for normal 64-bit use but
337 * setting gs_base has implicitly changed it, leave it.
338 */
339 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
340 task->thread.gs != 0) ||
341 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
342 task->thread.gs == 0))
343 break;
344 task->thread.gsindex = value;
345 if (task == current)
346 load_gs_index(task->thread.gsindex);
347 break;
348 case offsetof(struct user_regs_struct,ds):
349 task->thread.ds = value;
350 if (task == current)
351 loadsegment(ds, task->thread.ds);
352 break;
353 case offsetof(struct user_regs_struct,es):
354 task->thread.es = value;
355 if (task == current)
356 loadsegment(es, task->thread.es);
357 break;
358
359 /*
360 * Can't actually change these in 64-bit mode.
361 */
362 case offsetof(struct user_regs_struct,cs):
363 if (unlikely(value == 0))
364 return -EIO;
365 #ifdef CONFIG_IA32_EMULATION
366 if (test_tsk_thread_flag(task, TIF_IA32))
367 task_pt_regs(task)->cs = value;
368 #endif
369 break;
370 case offsetof(struct user_regs_struct,ss):
371 if (unlikely(value == 0))
372 return -EIO;
373 #ifdef CONFIG_IA32_EMULATION
374 if (test_tsk_thread_flag(task, TIF_IA32))
375 task_pt_regs(task)->ss = value;
376 #endif
377 break;
378 }
379
380 return 0;
381 }
382
383 #endif /* CONFIG_X86_32 */
384
get_flags(struct task_struct * task)385 static unsigned long get_flags(struct task_struct *task)
386 {
387 unsigned long retval = task_pt_regs(task)->flags;
388
389 /*
390 * If the debugger set TF, hide it from the readout.
391 */
392 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
393 retval &= ~X86_EFLAGS_TF;
394
395 return retval;
396 }
397
set_flags(struct task_struct * task,unsigned long value)398 static int set_flags(struct task_struct *task, unsigned long value)
399 {
400 struct pt_regs *regs = task_pt_regs(task);
401
402 /*
403 * If the user value contains TF, mark that
404 * it was not "us" (the debugger) that set it.
405 * If not, make sure it stays set if we had.
406 */
407 if (value & X86_EFLAGS_TF)
408 clear_tsk_thread_flag(task, TIF_FORCED_TF);
409 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
410 value |= X86_EFLAGS_TF;
411
412 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
413
414 return 0;
415 }
416
putreg(struct task_struct * child,unsigned long offset,unsigned long value)417 static int putreg(struct task_struct *child,
418 unsigned long offset, unsigned long value)
419 {
420 switch (offset) {
421 case offsetof(struct user_regs_struct, cs):
422 case offsetof(struct user_regs_struct, ds):
423 case offsetof(struct user_regs_struct, es):
424 case offsetof(struct user_regs_struct, fs):
425 case offsetof(struct user_regs_struct, gs):
426 case offsetof(struct user_regs_struct, ss):
427 return set_segment_reg(child, offset, value);
428
429 case offsetof(struct user_regs_struct, flags):
430 return set_flags(child, value);
431
432 #ifdef CONFIG_X86_64
433 case offsetof(struct user_regs_struct,fs_base):
434 if (value >= TASK_SIZE_OF(child))
435 return -EIO;
436 /*
437 * When changing the segment base, use do_arch_prctl
438 * to set either thread.fs or thread.fsindex and the
439 * corresponding GDT slot.
440 */
441 if (child->thread.fs != value)
442 return do_arch_prctl(child, ARCH_SET_FS, value);
443 return 0;
444 case offsetof(struct user_regs_struct,gs_base):
445 /*
446 * Exactly the same here as the %fs handling above.
447 */
448 if (value >= TASK_SIZE_OF(child))
449 return -EIO;
450 if (child->thread.gs != value)
451 return do_arch_prctl(child, ARCH_SET_GS, value);
452 return 0;
453 #endif
454 }
455
456 *pt_regs_access(task_pt_regs(child), offset) = value;
457 return 0;
458 }
459
getreg(struct task_struct * task,unsigned long offset)460 static unsigned long getreg(struct task_struct *task, unsigned long offset)
461 {
462 switch (offset) {
463 case offsetof(struct user_regs_struct, cs):
464 case offsetof(struct user_regs_struct, ds):
465 case offsetof(struct user_regs_struct, es):
466 case offsetof(struct user_regs_struct, fs):
467 case offsetof(struct user_regs_struct, gs):
468 case offsetof(struct user_regs_struct, ss):
469 return get_segment_reg(task, offset);
470
471 case offsetof(struct user_regs_struct, flags):
472 return get_flags(task);
473
474 #ifdef CONFIG_X86_64
475 case offsetof(struct user_regs_struct, fs_base): {
476 /*
477 * do_arch_prctl may have used a GDT slot instead of
478 * the MSR. To userland, it appears the same either
479 * way, except the %fs segment selector might not be 0.
480 */
481 unsigned int seg = task->thread.fsindex;
482 if (task->thread.fs != 0)
483 return task->thread.fs;
484 if (task == current)
485 asm("movl %%fs,%0" : "=r" (seg));
486 if (seg != FS_TLS_SEL)
487 return 0;
488 return get_desc_base(&task->thread.tls_array[FS_TLS]);
489 }
490 case offsetof(struct user_regs_struct, gs_base): {
491 /*
492 * Exactly the same here as the %fs handling above.
493 */
494 unsigned int seg = task->thread.gsindex;
495 if (task->thread.gs != 0)
496 return task->thread.gs;
497 if (task == current)
498 asm("movl %%gs,%0" : "=r" (seg));
499 if (seg != GS_TLS_SEL)
500 return 0;
501 return get_desc_base(&task->thread.tls_array[GS_TLS]);
502 }
503 #endif
504 }
505
506 return *pt_regs_access(task_pt_regs(task), offset);
507 }
508
genregs_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)509 static int genregs_get(struct task_struct *target,
510 const struct user_regset *regset,
511 unsigned int pos, unsigned int count,
512 void *kbuf, void __user *ubuf)
513 {
514 if (kbuf) {
515 unsigned long *k = kbuf;
516 while (count >= sizeof(*k)) {
517 *k++ = getreg(target, pos);
518 count -= sizeof(*k);
519 pos += sizeof(*k);
520 }
521 } else {
522 unsigned long __user *u = ubuf;
523 while (count >= sizeof(*u)) {
524 if (__put_user(getreg(target, pos), u++))
525 return -EFAULT;
526 count -= sizeof(*u);
527 pos += sizeof(*u);
528 }
529 }
530
531 return 0;
532 }
533
genregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)534 static int genregs_set(struct task_struct *target,
535 const struct user_regset *regset,
536 unsigned int pos, unsigned int count,
537 const void *kbuf, const void __user *ubuf)
538 {
539 int ret = 0;
540 if (kbuf) {
541 const unsigned long *k = kbuf;
542 while (count >= sizeof(*k) && !ret) {
543 ret = putreg(target, pos, *k++);
544 count -= sizeof(*k);
545 pos += sizeof(*k);
546 }
547 } else {
548 const unsigned long __user *u = ubuf;
549 while (count >= sizeof(*u) && !ret) {
550 unsigned long word;
551 ret = __get_user(word, u++);
552 if (ret)
553 break;
554 ret = putreg(target, pos, word);
555 count -= sizeof(*u);
556 pos += sizeof(*u);
557 }
558 }
559 return ret;
560 }
561
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)562 static void ptrace_triggered(struct perf_event *bp,
563 struct perf_sample_data *data,
564 struct pt_regs *regs)
565 {
566 int i;
567 struct thread_struct *thread = &(current->thread);
568
569 /*
570 * Store in the virtual DR6 register the fact that the breakpoint
571 * was hit so the thread's debugger will see it.
572 */
573 for (i = 0; i < HBP_NUM; i++) {
574 if (thread->ptrace_bps[i] == bp)
575 break;
576 }
577
578 thread->debugreg6 |= (DR_TRAP0 << i);
579 }
580
581 /*
582 * Walk through every ptrace breakpoints for this thread and
583 * build the dr7 value on top of their attributes.
584 *
585 */
ptrace_get_dr7(struct perf_event * bp[])586 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
587 {
588 int i;
589 int dr7 = 0;
590 struct arch_hw_breakpoint *info;
591
592 for (i = 0; i < HBP_NUM; i++) {
593 if (bp[i] && !bp[i]->attr.disabled) {
594 info = counter_arch_bp(bp[i]);
595 dr7 |= encode_dr7(i, info->len, info->type);
596 }
597 }
598
599 return dr7;
600 }
601
602 static int
ptrace_modify_breakpoint(struct perf_event * bp,int len,int type,struct task_struct * tsk,int disabled)603 ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
604 struct task_struct *tsk, int disabled)
605 {
606 int err;
607 int gen_len, gen_type;
608 struct perf_event_attr attr;
609
610 /*
611 * We should have at least an inactive breakpoint at this
612 * slot. It means the user is writing dr7 without having
613 * written the address register first
614 */
615 if (!bp)
616 return -EINVAL;
617
618 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
619 if (err)
620 return err;
621
622 attr = bp->attr;
623 attr.bp_len = gen_len;
624 attr.bp_type = gen_type;
625 attr.disabled = disabled;
626
627 return modify_user_hw_breakpoint(bp, &attr);
628 }
629
630 /*
631 * Handle ptrace writes to debug register 7.
632 */
ptrace_write_dr7(struct task_struct * tsk,unsigned long data)633 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
634 {
635 struct thread_struct *thread = &(tsk->thread);
636 unsigned long old_dr7;
637 int i, orig_ret = 0, rc = 0;
638 int enabled, second_pass = 0;
639 unsigned len, type;
640 struct perf_event *bp;
641
642 if (ptrace_get_breakpoints(tsk) < 0)
643 return -ESRCH;
644
645 data &= ~DR_CONTROL_RESERVED;
646 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
647 restore:
648 /*
649 * Loop through all the hardware breakpoints, making the
650 * appropriate changes to each.
651 */
652 for (i = 0; i < HBP_NUM; i++) {
653 enabled = decode_dr7(data, i, &len, &type);
654 bp = thread->ptrace_bps[i];
655
656 if (!enabled) {
657 if (bp) {
658 /*
659 * Don't unregister the breakpoints right-away,
660 * unless all register_user_hw_breakpoint()
661 * requests have succeeded. This prevents
662 * any window of opportunity for debug
663 * register grabbing by other users.
664 */
665 if (!second_pass)
666 continue;
667
668 rc = ptrace_modify_breakpoint(bp, len, type,
669 tsk, 1);
670 if (rc)
671 break;
672 }
673 continue;
674 }
675
676 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
677 if (rc)
678 break;
679 }
680 /*
681 * Make a second pass to free the remaining unused breakpoints
682 * or to restore the original breakpoints if an error occurred.
683 */
684 if (!second_pass) {
685 second_pass = 1;
686 if (rc < 0) {
687 orig_ret = rc;
688 data = old_dr7;
689 }
690 goto restore;
691 }
692
693 ptrace_put_breakpoints(tsk);
694
695 return ((orig_ret < 0) ? orig_ret : rc);
696 }
697
698 /*
699 * Handle PTRACE_PEEKUSR calls for the debug register area.
700 */
ptrace_get_debugreg(struct task_struct * tsk,int n)701 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
702 {
703 struct thread_struct *thread = &(tsk->thread);
704 unsigned long val = 0;
705
706 if (n < HBP_NUM) {
707 struct perf_event *bp;
708
709 if (ptrace_get_breakpoints(tsk) < 0)
710 return -ESRCH;
711
712 bp = thread->ptrace_bps[n];
713 if (!bp)
714 val = 0;
715 else
716 val = bp->hw.info.address;
717
718 ptrace_put_breakpoints(tsk);
719 } else if (n == 6) {
720 val = thread->debugreg6;
721 } else if (n == 7) {
722 val = thread->ptrace_dr7;
723 }
724 return val;
725 }
726
ptrace_set_breakpoint_addr(struct task_struct * tsk,int nr,unsigned long addr)727 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
728 unsigned long addr)
729 {
730 struct perf_event *bp;
731 struct thread_struct *t = &tsk->thread;
732 struct perf_event_attr attr;
733 int err = 0;
734
735 if (ptrace_get_breakpoints(tsk) < 0)
736 return -ESRCH;
737
738 if (!t->ptrace_bps[nr]) {
739 ptrace_breakpoint_init(&attr);
740 /*
741 * Put stub len and type to register (reserve) an inactive but
742 * correct bp
743 */
744 attr.bp_addr = addr;
745 attr.bp_len = HW_BREAKPOINT_LEN_1;
746 attr.bp_type = HW_BREAKPOINT_W;
747 attr.disabled = 1;
748
749 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
750 NULL, tsk);
751
752 /*
753 * CHECKME: the previous code returned -EIO if the addr wasn't
754 * a valid task virtual addr. The new one will return -EINVAL in
755 * this case.
756 * -EINVAL may be what we want for in-kernel breakpoints users,
757 * but -EIO looks better for ptrace, since we refuse a register
758 * writing for the user. And anyway this is the previous
759 * behaviour.
760 */
761 if (IS_ERR(bp)) {
762 err = PTR_ERR(bp);
763 goto put;
764 }
765
766 t->ptrace_bps[nr] = bp;
767 } else {
768 bp = t->ptrace_bps[nr];
769
770 attr = bp->attr;
771 attr.bp_addr = addr;
772 err = modify_user_hw_breakpoint(bp, &attr);
773 }
774
775 put:
776 ptrace_put_breakpoints(tsk);
777 return err;
778 }
779
780 /*
781 * Handle PTRACE_POKEUSR calls for the debug register area.
782 */
ptrace_set_debugreg(struct task_struct * tsk,int n,unsigned long val)783 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
784 unsigned long val)
785 {
786 struct thread_struct *thread = &(tsk->thread);
787 int rc = 0;
788
789 /* There are no DR4 or DR5 registers */
790 if (n == 4 || n == 5)
791 return -EIO;
792
793 if (n == 6) {
794 thread->debugreg6 = val;
795 goto ret_path;
796 }
797 if (n < HBP_NUM) {
798 rc = ptrace_set_breakpoint_addr(tsk, n, val);
799 if (rc)
800 return rc;
801 }
802 /* All that's left is DR7 */
803 if (n == 7) {
804 rc = ptrace_write_dr7(tsk, val);
805 if (!rc)
806 thread->ptrace_dr7 = val;
807 }
808
809 ret_path:
810 return rc;
811 }
812
813 /*
814 * These access the current or another (stopped) task's io permission
815 * bitmap for debugging or core dump.
816 */
ioperm_active(struct task_struct * target,const struct user_regset * regset)817 static int ioperm_active(struct task_struct *target,
818 const struct user_regset *regset)
819 {
820 return target->thread.io_bitmap_max / regset->size;
821 }
822
ioperm_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)823 static int ioperm_get(struct task_struct *target,
824 const struct user_regset *regset,
825 unsigned int pos, unsigned int count,
826 void *kbuf, void __user *ubuf)
827 {
828 if (!target->thread.io_bitmap_ptr)
829 return -ENXIO;
830
831 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
832 target->thread.io_bitmap_ptr,
833 0, IO_BITMAP_BYTES);
834 }
835
836 /*
837 * Called by kernel/ptrace.c when detaching..
838 *
839 * Make sure the single step bit is not set.
840 */
ptrace_disable(struct task_struct * child)841 void ptrace_disable(struct task_struct *child)
842 {
843 user_disable_single_step(child);
844 #ifdef TIF_SYSCALL_EMU
845 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
846 #endif
847 }
848
849 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
850 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
851 #endif
852
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)853 long arch_ptrace(struct task_struct *child, long request,
854 unsigned long addr, unsigned long data)
855 {
856 int ret;
857 unsigned long __user *datap = (unsigned long __user *)data;
858
859 switch (request) {
860 /* read the word at location addr in the USER area. */
861 case PTRACE_PEEKUSR: {
862 unsigned long tmp;
863
864 ret = -EIO;
865 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
866 break;
867
868 tmp = 0; /* Default return condition */
869 if (addr < sizeof(struct user_regs_struct))
870 tmp = getreg(child, addr);
871 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
872 addr <= offsetof(struct user, u_debugreg[7])) {
873 addr -= offsetof(struct user, u_debugreg[0]);
874 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
875 }
876 ret = put_user(tmp, datap);
877 break;
878 }
879
880 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
881 ret = -EIO;
882 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
883 break;
884
885 if (addr < sizeof(struct user_regs_struct))
886 ret = putreg(child, addr, data);
887 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
888 addr <= offsetof(struct user, u_debugreg[7])) {
889 addr -= offsetof(struct user, u_debugreg[0]);
890 ret = ptrace_set_debugreg(child,
891 addr / sizeof(data), data);
892 }
893 break;
894
895 case PTRACE_GETREGS: /* Get all gp regs from the child. */
896 return copy_regset_to_user(child,
897 task_user_regset_view(current),
898 REGSET_GENERAL,
899 0, sizeof(struct user_regs_struct),
900 datap);
901
902 case PTRACE_SETREGS: /* Set all gp regs in the child. */
903 return copy_regset_from_user(child,
904 task_user_regset_view(current),
905 REGSET_GENERAL,
906 0, sizeof(struct user_regs_struct),
907 datap);
908
909 case PTRACE_GETFPREGS: /* Get the child FPU state. */
910 return copy_regset_to_user(child,
911 task_user_regset_view(current),
912 REGSET_FP,
913 0, sizeof(struct user_i387_struct),
914 datap);
915
916 case PTRACE_SETFPREGS: /* Set the child FPU state. */
917 return copy_regset_from_user(child,
918 task_user_regset_view(current),
919 REGSET_FP,
920 0, sizeof(struct user_i387_struct),
921 datap);
922
923 #ifdef CONFIG_X86_32
924 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
925 return copy_regset_to_user(child, &user_x86_32_view,
926 REGSET_XFP,
927 0, sizeof(struct user_fxsr_struct),
928 datap) ? -EIO : 0;
929
930 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
931 return copy_regset_from_user(child, &user_x86_32_view,
932 REGSET_XFP,
933 0, sizeof(struct user_fxsr_struct),
934 datap) ? -EIO : 0;
935 #endif
936
937 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
938 case PTRACE_GET_THREAD_AREA:
939 if ((int) addr < 0)
940 return -EIO;
941 ret = do_get_thread_area(child, addr,
942 (struct user_desc __user *)data);
943 break;
944
945 case PTRACE_SET_THREAD_AREA:
946 if ((int) addr < 0)
947 return -EIO;
948 ret = do_set_thread_area(child, addr,
949 (struct user_desc __user *)data, 0);
950 break;
951 #endif
952
953 #ifdef CONFIG_X86_64
954 /* normal 64bit interface to access TLS data.
955 Works just like arch_prctl, except that the arguments
956 are reversed. */
957 case PTRACE_ARCH_PRCTL:
958 ret = do_arch_prctl(child, data, addr);
959 break;
960 #endif
961
962 default:
963 ret = ptrace_request(child, request, addr, data);
964 break;
965 }
966
967 return ret;
968 }
969
970 #ifdef CONFIG_IA32_EMULATION
971
972 #include <linux/compat.h>
973 #include <linux/syscalls.h>
974 #include <asm/ia32.h>
975 #include <asm/user32.h>
976
977 #define R32(l,q) \
978 case offsetof(struct user32, regs.l): \
979 regs->q = value; break
980
981 #define SEG32(rs) \
982 case offsetof(struct user32, regs.rs): \
983 return set_segment_reg(child, \
984 offsetof(struct user_regs_struct, rs), \
985 value); \
986 break
987
putreg32(struct task_struct * child,unsigned regno,u32 value)988 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
989 {
990 struct pt_regs *regs = task_pt_regs(child);
991
992 switch (regno) {
993
994 SEG32(cs);
995 SEG32(ds);
996 SEG32(es);
997 SEG32(fs);
998 SEG32(gs);
999 SEG32(ss);
1000
1001 R32(ebx, bx);
1002 R32(ecx, cx);
1003 R32(edx, dx);
1004 R32(edi, di);
1005 R32(esi, si);
1006 R32(ebp, bp);
1007 R32(eax, ax);
1008 R32(eip, ip);
1009 R32(esp, sp);
1010
1011 case offsetof(struct user32, regs.orig_eax):
1012 /*
1013 * A 32-bit debugger setting orig_eax means to restore
1014 * the state of the task restarting a 32-bit syscall.
1015 * Make sure we interpret the -ERESTART* codes correctly
1016 * in case the task is not actually still sitting at the
1017 * exit from a 32-bit syscall with TS_COMPAT still set.
1018 */
1019 regs->orig_ax = value;
1020 if (syscall_get_nr(child, regs) >= 0)
1021 task_thread_info(child)->status |= TS_COMPAT;
1022 break;
1023
1024 case offsetof(struct user32, regs.eflags):
1025 return set_flags(child, value);
1026
1027 case offsetof(struct user32, u_debugreg[0]) ...
1028 offsetof(struct user32, u_debugreg[7]):
1029 regno -= offsetof(struct user32, u_debugreg[0]);
1030 return ptrace_set_debugreg(child, regno / 4, value);
1031
1032 default:
1033 if (regno > sizeof(struct user32) || (regno & 3))
1034 return -EIO;
1035
1036 /*
1037 * Other dummy fields in the virtual user structure
1038 * are ignored
1039 */
1040 break;
1041 }
1042 return 0;
1043 }
1044
1045 #undef R32
1046 #undef SEG32
1047
1048 #define R32(l,q) \
1049 case offsetof(struct user32, regs.l): \
1050 *val = regs->q; break
1051
1052 #define SEG32(rs) \
1053 case offsetof(struct user32, regs.rs): \
1054 *val = get_segment_reg(child, \
1055 offsetof(struct user_regs_struct, rs)); \
1056 break
1057
getreg32(struct task_struct * child,unsigned regno,u32 * val)1058 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1059 {
1060 struct pt_regs *regs = task_pt_regs(child);
1061
1062 switch (regno) {
1063
1064 SEG32(ds);
1065 SEG32(es);
1066 SEG32(fs);
1067 SEG32(gs);
1068
1069 R32(cs, cs);
1070 R32(ss, ss);
1071 R32(ebx, bx);
1072 R32(ecx, cx);
1073 R32(edx, dx);
1074 R32(edi, di);
1075 R32(esi, si);
1076 R32(ebp, bp);
1077 R32(eax, ax);
1078 R32(orig_eax, orig_ax);
1079 R32(eip, ip);
1080 R32(esp, sp);
1081
1082 case offsetof(struct user32, regs.eflags):
1083 *val = get_flags(child);
1084 break;
1085
1086 case offsetof(struct user32, u_debugreg[0]) ...
1087 offsetof(struct user32, u_debugreg[7]):
1088 regno -= offsetof(struct user32, u_debugreg[0]);
1089 *val = ptrace_get_debugreg(child, regno / 4);
1090 break;
1091
1092 default:
1093 if (regno > sizeof(struct user32) || (regno & 3))
1094 return -EIO;
1095
1096 /*
1097 * Other dummy fields in the virtual user structure
1098 * are ignored
1099 */
1100 *val = 0;
1101 break;
1102 }
1103 return 0;
1104 }
1105
1106 #undef R32
1107 #undef SEG32
1108
genregs32_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)1109 static int genregs32_get(struct task_struct *target,
1110 const struct user_regset *regset,
1111 unsigned int pos, unsigned int count,
1112 void *kbuf, void __user *ubuf)
1113 {
1114 if (kbuf) {
1115 compat_ulong_t *k = kbuf;
1116 while (count >= sizeof(*k)) {
1117 getreg32(target, pos, k++);
1118 count -= sizeof(*k);
1119 pos += sizeof(*k);
1120 }
1121 } else {
1122 compat_ulong_t __user *u = ubuf;
1123 while (count >= sizeof(*u)) {
1124 compat_ulong_t word;
1125 getreg32(target, pos, &word);
1126 if (__put_user(word, u++))
1127 return -EFAULT;
1128 count -= sizeof(*u);
1129 pos += sizeof(*u);
1130 }
1131 }
1132
1133 return 0;
1134 }
1135
genregs32_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1136 static int genregs32_set(struct task_struct *target,
1137 const struct user_regset *regset,
1138 unsigned int pos, unsigned int count,
1139 const void *kbuf, const void __user *ubuf)
1140 {
1141 int ret = 0;
1142 if (kbuf) {
1143 const compat_ulong_t *k = kbuf;
1144 while (count >= sizeof(*k) && !ret) {
1145 ret = putreg32(target, pos, *k++);
1146 count -= sizeof(*k);
1147 pos += sizeof(*k);
1148 }
1149 } else {
1150 const compat_ulong_t __user *u = ubuf;
1151 while (count >= sizeof(*u) && !ret) {
1152 compat_ulong_t word;
1153 ret = __get_user(word, u++);
1154 if (ret)
1155 break;
1156 ret = putreg32(target, pos, word);
1157 count -= sizeof(*u);
1158 pos += sizeof(*u);
1159 }
1160 }
1161 return ret;
1162 }
1163
1164 #ifdef CONFIG_X86_X32_ABI
x32_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1165 static long x32_arch_ptrace(struct task_struct *child,
1166 compat_long_t request, compat_ulong_t caddr,
1167 compat_ulong_t cdata)
1168 {
1169 unsigned long addr = caddr;
1170 unsigned long data = cdata;
1171 void __user *datap = compat_ptr(data);
1172 int ret;
1173
1174 switch (request) {
1175 /* Read 32bits at location addr in the USER area. Only allow
1176 to return the lower 32bits of segment and debug registers. */
1177 case PTRACE_PEEKUSR: {
1178 u32 tmp;
1179
1180 ret = -EIO;
1181 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1182 addr < offsetof(struct user_regs_struct, cs))
1183 break;
1184
1185 tmp = 0; /* Default return condition */
1186 if (addr < sizeof(struct user_regs_struct))
1187 tmp = getreg(child, addr);
1188 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1189 addr <= offsetof(struct user, u_debugreg[7])) {
1190 addr -= offsetof(struct user, u_debugreg[0]);
1191 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1192 }
1193 ret = put_user(tmp, (__u32 __user *)datap);
1194 break;
1195 }
1196
1197 /* Write the word at location addr in the USER area. Only allow
1198 to update segment and debug registers with the upper 32bits
1199 zero-extended. */
1200 case PTRACE_POKEUSR:
1201 ret = -EIO;
1202 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1203 addr < offsetof(struct user_regs_struct, cs))
1204 break;
1205
1206 if (addr < sizeof(struct user_regs_struct))
1207 ret = putreg(child, addr, data);
1208 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1209 addr <= offsetof(struct user, u_debugreg[7])) {
1210 addr -= offsetof(struct user, u_debugreg[0]);
1211 ret = ptrace_set_debugreg(child,
1212 addr / sizeof(data), data);
1213 }
1214 break;
1215
1216 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1217 return copy_regset_to_user(child,
1218 task_user_regset_view(current),
1219 REGSET_GENERAL,
1220 0, sizeof(struct user_regs_struct),
1221 datap);
1222
1223 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1224 return copy_regset_from_user(child,
1225 task_user_regset_view(current),
1226 REGSET_GENERAL,
1227 0, sizeof(struct user_regs_struct),
1228 datap);
1229
1230 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1231 return copy_regset_to_user(child,
1232 task_user_regset_view(current),
1233 REGSET_FP,
1234 0, sizeof(struct user_i387_struct),
1235 datap);
1236
1237 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1238 return copy_regset_from_user(child,
1239 task_user_regset_view(current),
1240 REGSET_FP,
1241 0, sizeof(struct user_i387_struct),
1242 datap);
1243
1244 default:
1245 return compat_ptrace_request(child, request, addr, data);
1246 }
1247
1248 return ret;
1249 }
1250 #endif
1251
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1252 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1253 compat_ulong_t caddr, compat_ulong_t cdata)
1254 {
1255 unsigned long addr = caddr;
1256 unsigned long data = cdata;
1257 void __user *datap = compat_ptr(data);
1258 int ret;
1259 __u32 val;
1260
1261 #ifdef CONFIG_X86_X32_ABI
1262 if (!is_ia32_task())
1263 return x32_arch_ptrace(child, request, caddr, cdata);
1264 #endif
1265
1266 switch (request) {
1267 case PTRACE_PEEKUSR:
1268 ret = getreg32(child, addr, &val);
1269 if (ret == 0)
1270 ret = put_user(val, (__u32 __user *)datap);
1271 break;
1272
1273 case PTRACE_POKEUSR:
1274 ret = putreg32(child, addr, data);
1275 break;
1276
1277 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1278 return copy_regset_to_user(child, &user_x86_32_view,
1279 REGSET_GENERAL,
1280 0, sizeof(struct user_regs_struct32),
1281 datap);
1282
1283 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1284 return copy_regset_from_user(child, &user_x86_32_view,
1285 REGSET_GENERAL, 0,
1286 sizeof(struct user_regs_struct32),
1287 datap);
1288
1289 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1290 return copy_regset_to_user(child, &user_x86_32_view,
1291 REGSET_FP, 0,
1292 sizeof(struct user_i387_ia32_struct),
1293 datap);
1294
1295 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1296 return copy_regset_from_user(
1297 child, &user_x86_32_view, REGSET_FP,
1298 0, sizeof(struct user_i387_ia32_struct), datap);
1299
1300 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1301 return copy_regset_to_user(child, &user_x86_32_view,
1302 REGSET_XFP, 0,
1303 sizeof(struct user32_fxsr_struct),
1304 datap);
1305
1306 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1307 return copy_regset_from_user(child, &user_x86_32_view,
1308 REGSET_XFP, 0,
1309 sizeof(struct user32_fxsr_struct),
1310 datap);
1311
1312 case PTRACE_GET_THREAD_AREA:
1313 case PTRACE_SET_THREAD_AREA:
1314 return arch_ptrace(child, request, addr, data);
1315
1316 default:
1317 return compat_ptrace_request(child, request, addr, data);
1318 }
1319
1320 return ret;
1321 }
1322
1323 #endif /* CONFIG_IA32_EMULATION */
1324
1325 #ifdef CONFIG_X86_64
1326
1327 static struct user_regset x86_64_regsets[] __read_mostly = {
1328 [REGSET_GENERAL] = {
1329 .core_note_type = NT_PRSTATUS,
1330 .n = sizeof(struct user_regs_struct) / sizeof(long),
1331 .size = sizeof(long), .align = sizeof(long),
1332 .get = genregs_get, .set = genregs_set
1333 },
1334 [REGSET_FP] = {
1335 .core_note_type = NT_PRFPREG,
1336 .n = sizeof(struct user_i387_struct) / sizeof(long),
1337 .size = sizeof(long), .align = sizeof(long),
1338 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1339 },
1340 [REGSET_XSTATE] = {
1341 .core_note_type = NT_X86_XSTATE,
1342 .size = sizeof(u64), .align = sizeof(u64),
1343 .active = xstateregs_active, .get = xstateregs_get,
1344 .set = xstateregs_set
1345 },
1346 [REGSET_IOPERM64] = {
1347 .core_note_type = NT_386_IOPERM,
1348 .n = IO_BITMAP_LONGS,
1349 .size = sizeof(long), .align = sizeof(long),
1350 .active = ioperm_active, .get = ioperm_get
1351 },
1352 };
1353
1354 static const struct user_regset_view user_x86_64_view = {
1355 .name = "x86_64", .e_machine = EM_X86_64,
1356 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1357 };
1358
1359 #else /* CONFIG_X86_32 */
1360
1361 #define user_regs_struct32 user_regs_struct
1362 #define genregs32_get genregs_get
1363 #define genregs32_set genregs_set
1364
1365 #define user_i387_ia32_struct user_i387_struct
1366 #define user32_fxsr_struct user_fxsr_struct
1367
1368 #endif /* CONFIG_X86_64 */
1369
1370 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1371 static struct user_regset x86_32_regsets[] __read_mostly = {
1372 [REGSET_GENERAL] = {
1373 .core_note_type = NT_PRSTATUS,
1374 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1375 .size = sizeof(u32), .align = sizeof(u32),
1376 .get = genregs32_get, .set = genregs32_set
1377 },
1378 [REGSET_FP] = {
1379 .core_note_type = NT_PRFPREG,
1380 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1381 .size = sizeof(u32), .align = sizeof(u32),
1382 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1383 },
1384 [REGSET_XFP] = {
1385 .core_note_type = NT_PRXFPREG,
1386 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1387 .size = sizeof(u32), .align = sizeof(u32),
1388 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1389 },
1390 [REGSET_XSTATE] = {
1391 .core_note_type = NT_X86_XSTATE,
1392 .size = sizeof(u64), .align = sizeof(u64),
1393 .active = xstateregs_active, .get = xstateregs_get,
1394 .set = xstateregs_set
1395 },
1396 [REGSET_TLS] = {
1397 .core_note_type = NT_386_TLS,
1398 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1399 .size = sizeof(struct user_desc),
1400 .align = sizeof(struct user_desc),
1401 .active = regset_tls_active,
1402 .get = regset_tls_get, .set = regset_tls_set
1403 },
1404 [REGSET_IOPERM32] = {
1405 .core_note_type = NT_386_IOPERM,
1406 .n = IO_BITMAP_BYTES / sizeof(u32),
1407 .size = sizeof(u32), .align = sizeof(u32),
1408 .active = ioperm_active, .get = ioperm_get
1409 },
1410 };
1411
1412 static const struct user_regset_view user_x86_32_view = {
1413 .name = "i386", .e_machine = EM_386,
1414 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1415 };
1416 #endif
1417
1418 /*
1419 * This represents bytes 464..511 in the memory layout exported through
1420 * the REGSET_XSTATE interface.
1421 */
1422 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1423
update_regset_xstate_info(unsigned int size,u64 xstate_mask)1424 void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1425 {
1426 #ifdef CONFIG_X86_64
1427 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1428 #endif
1429 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1430 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1431 #endif
1432 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1433 }
1434
task_user_regset_view(struct task_struct * task)1435 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1436 {
1437 #ifdef CONFIG_IA32_EMULATION
1438 if (test_tsk_thread_flag(task, TIF_IA32))
1439 #endif
1440 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1441 return &user_x86_32_view;
1442 #endif
1443 #ifdef CONFIG_X86_64
1444 return &user_x86_64_view;
1445 #endif
1446 }
1447
fill_sigtrap_info(struct task_struct * tsk,struct pt_regs * regs,int error_code,int si_code,struct siginfo * info)1448 static void fill_sigtrap_info(struct task_struct *tsk,
1449 struct pt_regs *regs,
1450 int error_code, int si_code,
1451 struct siginfo *info)
1452 {
1453 tsk->thread.trap_nr = X86_TRAP_DB;
1454 tsk->thread.error_code = error_code;
1455
1456 memset(info, 0, sizeof(*info));
1457 info->si_signo = SIGTRAP;
1458 info->si_code = si_code;
1459 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
1460 }
1461
user_single_step_siginfo(struct task_struct * tsk,struct pt_regs * regs,struct siginfo * info)1462 void user_single_step_siginfo(struct task_struct *tsk,
1463 struct pt_regs *regs,
1464 struct siginfo *info)
1465 {
1466 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1467 }
1468
send_sigtrap(struct task_struct * tsk,struct pt_regs * regs,int error_code,int si_code)1469 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1470 int error_code, int si_code)
1471 {
1472 struct siginfo info;
1473
1474 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1475 /* Send us the fake SIGTRAP */
1476 force_sig_info(SIGTRAP, &info, tsk);
1477 }
1478
1479
1480 #ifdef CONFIG_X86_32
1481 # define IS_IA32 1
1482 #elif defined CONFIG_IA32_EMULATION
1483 # define IS_IA32 is_compat_task()
1484 #else
1485 # define IS_IA32 0
1486 #endif
1487
1488 /*
1489 * We must return the syscall number to actually look up in the table.
1490 * This can be -1L to skip running any syscall at all.
1491 */
syscall_trace_enter(struct pt_regs * regs)1492 long syscall_trace_enter(struct pt_regs *regs)
1493 {
1494 long ret = 0;
1495
1496 /*
1497 * If we stepped into a sysenter/syscall insn, it trapped in
1498 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1499 * If user-mode had set TF itself, then it's still clear from
1500 * do_debug() and we need to set it again to restore the user
1501 * state. If we entered on the slow path, TF was already set.
1502 */
1503 if (test_thread_flag(TIF_SINGLESTEP))
1504 regs->flags |= X86_EFLAGS_TF;
1505
1506 /* do the secure computing check first */
1507 if (secure_computing(regs->orig_ax)) {
1508 /* seccomp failures shouldn't expose any additional code. */
1509 ret = -1L;
1510 goto out;
1511 }
1512
1513 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1514 ret = -1L;
1515
1516 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1517 tracehook_report_syscall_entry(regs))
1518 ret = -1L;
1519
1520 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1521 trace_sys_enter(regs, regs->orig_ax);
1522
1523 if (IS_IA32)
1524 audit_syscall_entry(AUDIT_ARCH_I386,
1525 regs->orig_ax,
1526 regs->bx, regs->cx,
1527 regs->dx, regs->si);
1528 #ifdef CONFIG_X86_64
1529 else
1530 audit_syscall_entry(AUDIT_ARCH_X86_64,
1531 regs->orig_ax,
1532 regs->di, regs->si,
1533 regs->dx, regs->r10);
1534 #endif
1535
1536 out:
1537 return ret ?: regs->orig_ax;
1538 }
1539
syscall_trace_leave(struct pt_regs * regs)1540 void syscall_trace_leave(struct pt_regs *regs)
1541 {
1542 bool step;
1543
1544 audit_syscall_exit(regs);
1545
1546 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1547 trace_sys_exit(regs, regs->ax);
1548
1549 /*
1550 * If TIF_SYSCALL_EMU is set, we only get here because of
1551 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1552 * We already reported this syscall instruction in
1553 * syscall_trace_enter().
1554 */
1555 step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
1556 !test_thread_flag(TIF_SYSCALL_EMU);
1557 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1558 tracehook_report_syscall_exit(regs, step);
1559 }
1560