• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 Ross Biro
7  * Copyright (C) Linus Torvalds
8  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9  * Copyright (C) 1996 David S. Miller
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 1999 MIPS Technologies, Inc.
12  * Copyright (C) 2000 Ulf Carlsson
13  *
14  * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15  * binaries.
16  */
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/mm.h>
24 #include <linux/errno.h>
25 #include <linux/ptrace.h>
26 #include <linux/regset.h>
27 #include <linux/smp.h>
28 #include <linux/security.h>
29 #include <linux/stddef.h>
30 #include <linux/tracehook.h>
31 #include <linux/audit.h>
32 #include <linux/seccomp.h>
33 #include <linux/ftrace.h>
34 
35 #include <asm/byteorder.h>
36 #include <asm/cpu.h>
37 #include <asm/cpu-info.h>
38 #include <asm/dsp.h>
39 #include <asm/fpu.h>
40 #include <asm/mipsregs.h>
41 #include <asm/mipsmtregs.h>
42 #include <asm/pgtable.h>
43 #include <asm/page.h>
44 #include <asm/syscall.h>
45 #include <linux/uaccess.h>
46 #include <asm/bootinfo.h>
47 #include <asm/reg.h>
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/syscalls.h>
51 
init_fp_ctx(struct task_struct * target)52 static void init_fp_ctx(struct task_struct *target)
53 {
54 	/* If FP has been used then the target already has context */
55 	if (tsk_used_math(target))
56 		return;
57 
58 	/* Begin with data registers set to all 1s... */
59 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
60 
61 	/* FCSR has been preset by `mips_set_personality_nan'.  */
62 
63 	/*
64 	 * Record that the target has "used" math, such that the context
65 	 * just initialised, and any modifications made by the caller,
66 	 * aren't discarded.
67 	 */
68 	set_stopped_child_used_math(target);
69 }
70 
71 /*
72  * Called by kernel/ptrace.c when detaching..
73  *
74  * Make sure single step bits etc are not set.
75  */
ptrace_disable(struct task_struct * child)76 void ptrace_disable(struct task_struct *child)
77 {
78 	/* Don't load the watchpoint registers for the ex-child. */
79 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
80 }
81 
82 /*
83  * Poke at FCSR according to its mask.  Set the Cause bits even
84  * if a corresponding Enable bit is set.  This will be noticed at
85  * the time the thread is switched to and SIGFPE thrown accordingly.
86  */
ptrace_setfcr31(struct task_struct * child,u32 value)87 static void ptrace_setfcr31(struct task_struct *child, u32 value)
88 {
89 	u32 fcr31;
90 	u32 mask;
91 
92 	fcr31 = child->thread.fpu.fcr31;
93 	mask = boot_cpu_data.fpu_msk31;
94 	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
95 }
96 
97 /*
98  * Read a general register set.	 We always use the 64-bit format, even
99  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
100  * Registers are sign extended to fill the available space.
101  */
ptrace_getregs(struct task_struct * child,struct user_pt_regs __user * data)102 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
103 {
104 	struct pt_regs *regs;
105 	int i;
106 
107 	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
108 		return -EIO;
109 
110 	regs = task_pt_regs(child);
111 
112 	for (i = 0; i < 32; i++)
113 		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
114 	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
115 	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
116 	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
117 	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
118 	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
119 	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
120 
121 	return 0;
122 }
123 
124 /*
125  * Write a general register set.  As for PTRACE_GETREGS, we always use
126  * the 64-bit format.  On a 32-bit kernel only the lower order half
127  * (according to endianness) will be used.
128  */
ptrace_setregs(struct task_struct * child,struct user_pt_regs __user * data)129 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
130 {
131 	struct pt_regs *regs;
132 	int i;
133 
134 	if (!access_ok(VERIFY_READ, data, 38 * 8))
135 		return -EIO;
136 
137 	regs = task_pt_regs(child);
138 
139 	for (i = 0; i < 32; i++)
140 		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
141 	__get_user(regs->lo, (__s64 __user *)&data->lo);
142 	__get_user(regs->hi, (__s64 __user *)&data->hi);
143 	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
144 
145 	/* badvaddr, status, and cause may not be written.  */
146 
147 	return 0;
148 }
149 
ptrace_getfpregs(struct task_struct * child,__u32 __user * data)150 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
151 {
152 	int i;
153 
154 	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
155 		return -EIO;
156 
157 	if (tsk_used_math(child)) {
158 		union fpureg *fregs = get_fpu_regs(child);
159 		for (i = 0; i < 32; i++)
160 			__put_user(get_fpr64(&fregs[i], 0),
161 				   i + (__u64 __user *)data);
162 	} else {
163 		for (i = 0; i < 32; i++)
164 			__put_user((__u64) -1, i + (__u64 __user *) data);
165 	}
166 
167 	__put_user(child->thread.fpu.fcr31, data + 64);
168 	__put_user(boot_cpu_data.fpu_id, data + 65);
169 
170 	return 0;
171 }
172 
ptrace_setfpregs(struct task_struct * child,__u32 __user * data)173 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
174 {
175 	union fpureg *fregs;
176 	u64 fpr_val;
177 	u32 value;
178 	int i;
179 
180 	if (!access_ok(VERIFY_READ, data, 33 * 8))
181 		return -EIO;
182 
183 	init_fp_ctx(child);
184 	fregs = get_fpu_regs(child);
185 
186 	for (i = 0; i < 32; i++) {
187 		__get_user(fpr_val, i + (__u64 __user *)data);
188 		set_fpr64(&fregs[i], 0, fpr_val);
189 	}
190 
191 	__get_user(value, data + 64);
192 	ptrace_setfcr31(child, value);
193 
194 	/* FIR may not be written.  */
195 
196 	return 0;
197 }
198 
ptrace_get_watch_regs(struct task_struct * child,struct pt_watch_regs __user * addr)199 int ptrace_get_watch_regs(struct task_struct *child,
200 			  struct pt_watch_regs __user *addr)
201 {
202 	enum pt_watch_style style;
203 	int i;
204 
205 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
206 		return -EIO;
207 	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
208 		return -EIO;
209 
210 #ifdef CONFIG_32BIT
211 	style = pt_watch_style_mips32;
212 #define WATCH_STYLE mips32
213 #else
214 	style = pt_watch_style_mips64;
215 #define WATCH_STYLE mips64
216 #endif
217 
218 	__put_user(style, &addr->style);
219 	__put_user(boot_cpu_data.watch_reg_use_cnt,
220 		   &addr->WATCH_STYLE.num_valid);
221 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
222 		__put_user(child->thread.watch.mips3264.watchlo[i],
223 			   &addr->WATCH_STYLE.watchlo[i]);
224 		__put_user(child->thread.watch.mips3264.watchhi[i] &
225 				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
226 			   &addr->WATCH_STYLE.watchhi[i]);
227 		__put_user(boot_cpu_data.watch_reg_masks[i],
228 			   &addr->WATCH_STYLE.watch_masks[i]);
229 	}
230 	for (; i < 8; i++) {
231 		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
232 		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
233 		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
234 	}
235 
236 	return 0;
237 }
238 
ptrace_set_watch_regs(struct task_struct * child,struct pt_watch_regs __user * addr)239 int ptrace_set_watch_regs(struct task_struct *child,
240 			  struct pt_watch_regs __user *addr)
241 {
242 	int i;
243 	int watch_active = 0;
244 	unsigned long lt[NUM_WATCH_REGS];
245 	u16 ht[NUM_WATCH_REGS];
246 
247 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
248 		return -EIO;
249 	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
250 		return -EIO;
251 	/* Check the values. */
252 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
253 		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
254 #ifdef CONFIG_32BIT
255 		if (lt[i] & __UA_LIMIT)
256 			return -EINVAL;
257 #else
258 		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
259 			if (lt[i] & 0xffffffff80000000UL)
260 				return -EINVAL;
261 		} else {
262 			if (lt[i] & __UA_LIMIT)
263 				return -EINVAL;
264 		}
265 #endif
266 		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
267 		if (ht[i] & ~MIPS_WATCHHI_MASK)
268 			return -EINVAL;
269 	}
270 	/* Install them. */
271 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
272 		if (lt[i] & MIPS_WATCHLO_IRW)
273 			watch_active = 1;
274 		child->thread.watch.mips3264.watchlo[i] = lt[i];
275 		/* Set the G bit. */
276 		child->thread.watch.mips3264.watchhi[i] = ht[i];
277 	}
278 
279 	if (watch_active)
280 		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
281 	else
282 		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
283 
284 	return 0;
285 }
286 
287 /* regset get/set implementations */
288 
289 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
290 
gpr32_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)291 static int gpr32_get(struct task_struct *target,
292 		     const struct user_regset *regset,
293 		     unsigned int pos, unsigned int count,
294 		     void *kbuf, void __user *ubuf)
295 {
296 	struct pt_regs *regs = task_pt_regs(target);
297 	u32 uregs[ELF_NGREG] = {};
298 
299 	mips_dump_regs32(uregs, regs);
300 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
301 				   sizeof(uregs));
302 }
303 
gpr32_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)304 static int gpr32_set(struct task_struct *target,
305 		     const struct user_regset *regset,
306 		     unsigned int pos, unsigned int count,
307 		     const void *kbuf, const void __user *ubuf)
308 {
309 	struct pt_regs *regs = task_pt_regs(target);
310 	u32 uregs[ELF_NGREG];
311 	unsigned start, num_regs, i;
312 	int err;
313 
314 	start = pos / sizeof(u32);
315 	num_regs = count / sizeof(u32);
316 
317 	if (start + num_regs > ELF_NGREG)
318 		return -EIO;
319 
320 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
321 				 sizeof(uregs));
322 	if (err)
323 		return err;
324 
325 	for (i = start; i < num_regs; i++) {
326 		/*
327 		 * Cast all values to signed here so that if this is a 64-bit
328 		 * kernel, the supplied 32-bit values will be sign extended.
329 		 */
330 		switch (i) {
331 		case MIPS32_EF_R1 ... MIPS32_EF_R25:
332 			/* k0/k1 are ignored. */
333 		case MIPS32_EF_R28 ... MIPS32_EF_R31:
334 			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
335 			break;
336 		case MIPS32_EF_LO:
337 			regs->lo = (s32)uregs[i];
338 			break;
339 		case MIPS32_EF_HI:
340 			regs->hi = (s32)uregs[i];
341 			break;
342 		case MIPS32_EF_CP0_EPC:
343 			regs->cp0_epc = (s32)uregs[i];
344 			break;
345 		}
346 	}
347 
348 	return 0;
349 }
350 
351 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
352 
353 #ifdef CONFIG_64BIT
354 
gpr64_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)355 static int gpr64_get(struct task_struct *target,
356 		     const struct user_regset *regset,
357 		     unsigned int pos, unsigned int count,
358 		     void *kbuf, void __user *ubuf)
359 {
360 	struct pt_regs *regs = task_pt_regs(target);
361 	u64 uregs[ELF_NGREG] = {};
362 
363 	mips_dump_regs64(uregs, regs);
364 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
365 				   sizeof(uregs));
366 }
367 
gpr64_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)368 static int gpr64_set(struct task_struct *target,
369 		     const struct user_regset *regset,
370 		     unsigned int pos, unsigned int count,
371 		     const void *kbuf, const void __user *ubuf)
372 {
373 	struct pt_regs *regs = task_pt_regs(target);
374 	u64 uregs[ELF_NGREG];
375 	unsigned start, num_regs, i;
376 	int err;
377 
378 	start = pos / sizeof(u64);
379 	num_regs = count / sizeof(u64);
380 
381 	if (start + num_regs > ELF_NGREG)
382 		return -EIO;
383 
384 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
385 				 sizeof(uregs));
386 	if (err)
387 		return err;
388 
389 	for (i = start; i < num_regs; i++) {
390 		switch (i) {
391 		case MIPS64_EF_R1 ... MIPS64_EF_R25:
392 			/* k0/k1 are ignored. */
393 		case MIPS64_EF_R28 ... MIPS64_EF_R31:
394 			regs->regs[i - MIPS64_EF_R0] = uregs[i];
395 			break;
396 		case MIPS64_EF_LO:
397 			regs->lo = uregs[i];
398 			break;
399 		case MIPS64_EF_HI:
400 			regs->hi = uregs[i];
401 			break;
402 		case MIPS64_EF_CP0_EPC:
403 			regs->cp0_epc = uregs[i];
404 			break;
405 		}
406 	}
407 
408 	return 0;
409 }
410 
411 #endif /* CONFIG_64BIT */
412 
413 /*
414  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
415  * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
416  * correspond 1:1 to buffer slots.  Only general registers are copied.
417  */
fpr_get_fpa(struct task_struct * target,unsigned int * pos,unsigned int * count,void ** kbuf,void __user ** ubuf)418 static int fpr_get_fpa(struct task_struct *target,
419 		       unsigned int *pos, unsigned int *count,
420 		       void **kbuf, void __user **ubuf)
421 {
422 	return user_regset_copyout(pos, count, kbuf, ubuf,
423 				   &target->thread.fpu,
424 				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
425 }
426 
427 /*
428  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
429  * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
430  * general register slots are copied to buffer slots.  Only general
431  * registers are copied.
432  */
fpr_get_msa(struct task_struct * target,unsigned int * pos,unsigned int * count,void ** kbuf,void __user ** ubuf)433 static int fpr_get_msa(struct task_struct *target,
434 		       unsigned int *pos, unsigned int *count,
435 		       void **kbuf, void __user **ubuf)
436 {
437 	unsigned int i;
438 	u64 fpr_val;
439 	int err;
440 
441 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
442 	for (i = 0; i < NUM_FPU_REGS; i++) {
443 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
444 		err = user_regset_copyout(pos, count, kbuf, ubuf,
445 					  &fpr_val, i * sizeof(elf_fpreg_t),
446 					  (i + 1) * sizeof(elf_fpreg_t));
447 		if (err)
448 			return err;
449 	}
450 
451 	return 0;
452 }
453 
454 /*
455  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
456  * Choose the appropriate helper for general registers, and then copy
457  * the FCSR and FIR registers separately.
458  */
fpr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)459 static int fpr_get(struct task_struct *target,
460 		   const struct user_regset *regset,
461 		   unsigned int pos, unsigned int count,
462 		   void *kbuf, void __user *ubuf)
463 {
464 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
465 	const int fir_pos = fcr31_pos + sizeof(u32);
466 	int err;
467 
468 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
469 		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
470 	else
471 		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
472 	if (err)
473 		return err;
474 
475 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
476 				  &target->thread.fpu.fcr31,
477 				  fcr31_pos, fcr31_pos + sizeof(u32));
478 	if (err)
479 		return err;
480 
481 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
482 				  &boot_cpu_data.fpu_id,
483 				  fir_pos, fir_pos + sizeof(u32));
484 
485 	return err;
486 }
487 
488 /*
489  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
490  * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
491  * context's general register slots.  Only general registers are copied.
492  */
fpr_set_fpa(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)493 static int fpr_set_fpa(struct task_struct *target,
494 		       unsigned int *pos, unsigned int *count,
495 		       const void **kbuf, const void __user **ubuf)
496 {
497 	return user_regset_copyin(pos, count, kbuf, ubuf,
498 				  &target->thread.fpu,
499 				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
500 }
501 
502 /*
503  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
504  * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
505  * bits only of FP context's general register slots.  Only general
506  * registers are copied.
507  */
fpr_set_msa(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)508 static int fpr_set_msa(struct task_struct *target,
509 		       unsigned int *pos, unsigned int *count,
510 		       const void **kbuf, const void __user **ubuf)
511 {
512 	unsigned int i;
513 	u64 fpr_val;
514 	int err;
515 
516 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
517 	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
518 		err = user_regset_copyin(pos, count, kbuf, ubuf,
519 					 &fpr_val, i * sizeof(elf_fpreg_t),
520 					 (i + 1) * sizeof(elf_fpreg_t));
521 		if (err)
522 			return err;
523 		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
524 	}
525 
526 	return 0;
527 }
528 
529 /*
530  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
531  * Choose the appropriate helper for general registers, and then copy
532  * the FCSR register separately.  Ignore the incoming FIR register
533  * contents though, as the register is read-only.
534  *
535  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
536  * which is supposed to have been guaranteed by the kernel before
537  * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
538  * so that we can safely avoid preinitializing temporaries for
539  * partial register writes.
540  */
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)541 static int fpr_set(struct task_struct *target,
542 		   const struct user_regset *regset,
543 		   unsigned int pos, unsigned int count,
544 		   const void *kbuf, const void __user *ubuf)
545 {
546 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
547 	const int fir_pos = fcr31_pos + sizeof(u32);
548 	u32 fcr31;
549 	int err;
550 
551 	BUG_ON(count % sizeof(elf_fpreg_t));
552 
553 	if (pos + count > sizeof(elf_fpregset_t))
554 		return -EIO;
555 
556 	init_fp_ctx(target);
557 
558 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
559 		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
560 	else
561 		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
562 	if (err)
563 		return err;
564 
565 	if (count > 0) {
566 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
567 					 &fcr31,
568 					 fcr31_pos, fcr31_pos + sizeof(u32));
569 		if (err)
570 			return err;
571 
572 		ptrace_setfcr31(target, fcr31);
573 	}
574 
575 	if (count > 0)
576 		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
577 						fir_pos,
578 						fir_pos + sizeof(u32));
579 
580 	return err;
581 }
582 
583 enum mips_regset {
584 	REGSET_GPR,
585 	REGSET_FPR,
586 };
587 
588 struct pt_regs_offset {
589 	const char *name;
590 	int offset;
591 };
592 
593 #define REG_OFFSET_NAME(reg, r) {					\
594 	.name = #reg,							\
595 	.offset = offsetof(struct pt_regs, r)				\
596 }
597 
598 #define REG_OFFSET_END {						\
599 	.name = NULL,							\
600 	.offset = 0							\
601 }
602 
603 static const struct pt_regs_offset regoffset_table[] = {
604 	REG_OFFSET_NAME(r0, regs[0]),
605 	REG_OFFSET_NAME(r1, regs[1]),
606 	REG_OFFSET_NAME(r2, regs[2]),
607 	REG_OFFSET_NAME(r3, regs[3]),
608 	REG_OFFSET_NAME(r4, regs[4]),
609 	REG_OFFSET_NAME(r5, regs[5]),
610 	REG_OFFSET_NAME(r6, regs[6]),
611 	REG_OFFSET_NAME(r7, regs[7]),
612 	REG_OFFSET_NAME(r8, regs[8]),
613 	REG_OFFSET_NAME(r9, regs[9]),
614 	REG_OFFSET_NAME(r10, regs[10]),
615 	REG_OFFSET_NAME(r11, regs[11]),
616 	REG_OFFSET_NAME(r12, regs[12]),
617 	REG_OFFSET_NAME(r13, regs[13]),
618 	REG_OFFSET_NAME(r14, regs[14]),
619 	REG_OFFSET_NAME(r15, regs[15]),
620 	REG_OFFSET_NAME(r16, regs[16]),
621 	REG_OFFSET_NAME(r17, regs[17]),
622 	REG_OFFSET_NAME(r18, regs[18]),
623 	REG_OFFSET_NAME(r19, regs[19]),
624 	REG_OFFSET_NAME(r20, regs[20]),
625 	REG_OFFSET_NAME(r21, regs[21]),
626 	REG_OFFSET_NAME(r22, regs[22]),
627 	REG_OFFSET_NAME(r23, regs[23]),
628 	REG_OFFSET_NAME(r24, regs[24]),
629 	REG_OFFSET_NAME(r25, regs[25]),
630 	REG_OFFSET_NAME(r26, regs[26]),
631 	REG_OFFSET_NAME(r27, regs[27]),
632 	REG_OFFSET_NAME(r28, regs[28]),
633 	REG_OFFSET_NAME(r29, regs[29]),
634 	REG_OFFSET_NAME(r30, regs[30]),
635 	REG_OFFSET_NAME(r31, regs[31]),
636 	REG_OFFSET_NAME(c0_status, cp0_status),
637 	REG_OFFSET_NAME(hi, hi),
638 	REG_OFFSET_NAME(lo, lo),
639 #ifdef CONFIG_CPU_HAS_SMARTMIPS
640 	REG_OFFSET_NAME(acx, acx),
641 #endif
642 	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
643 	REG_OFFSET_NAME(c0_cause, cp0_cause),
644 	REG_OFFSET_NAME(c0_epc, cp0_epc),
645 #ifdef CONFIG_CPU_CAVIUM_OCTEON
646 	REG_OFFSET_NAME(mpl0, mpl[0]),
647 	REG_OFFSET_NAME(mpl1, mpl[1]),
648 	REG_OFFSET_NAME(mpl2, mpl[2]),
649 	REG_OFFSET_NAME(mtp0, mtp[0]),
650 	REG_OFFSET_NAME(mtp1, mtp[1]),
651 	REG_OFFSET_NAME(mtp2, mtp[2]),
652 #endif
653 	REG_OFFSET_END,
654 };
655 
656 /**
657  * regs_query_register_offset() - query register offset from its name
658  * @name:       the name of a register
659  *
660  * regs_query_register_offset() returns the offset of a register in struct
661  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
662  */
regs_query_register_offset(const char * name)663 int regs_query_register_offset(const char *name)
664 {
665         const struct pt_regs_offset *roff;
666         for (roff = regoffset_table; roff->name != NULL; roff++)
667                 if (!strcmp(roff->name, name))
668                         return roff->offset;
669         return -EINVAL;
670 }
671 
672 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
673 
674 static const struct user_regset mips_regsets[] = {
675 	[REGSET_GPR] = {
676 		.core_note_type	= NT_PRSTATUS,
677 		.n		= ELF_NGREG,
678 		.size		= sizeof(unsigned int),
679 		.align		= sizeof(unsigned int),
680 		.get		= gpr32_get,
681 		.set		= gpr32_set,
682 	},
683 	[REGSET_FPR] = {
684 		.core_note_type	= NT_PRFPREG,
685 		.n		= ELF_NFPREG,
686 		.size		= sizeof(elf_fpreg_t),
687 		.align		= sizeof(elf_fpreg_t),
688 		.get		= fpr_get,
689 		.set		= fpr_set,
690 	},
691 };
692 
693 static const struct user_regset_view user_mips_view = {
694 	.name		= "mips",
695 	.e_machine	= ELF_ARCH,
696 	.ei_osabi	= ELF_OSABI,
697 	.regsets	= mips_regsets,
698 	.n		= ARRAY_SIZE(mips_regsets),
699 };
700 
701 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
702 
703 #ifdef CONFIG_64BIT
704 
705 static const struct user_regset mips64_regsets[] = {
706 	[REGSET_GPR] = {
707 		.core_note_type	= NT_PRSTATUS,
708 		.n		= ELF_NGREG,
709 		.size		= sizeof(unsigned long),
710 		.align		= sizeof(unsigned long),
711 		.get		= gpr64_get,
712 		.set		= gpr64_set,
713 	},
714 	[REGSET_FPR] = {
715 		.core_note_type	= NT_PRFPREG,
716 		.n		= ELF_NFPREG,
717 		.size		= sizeof(elf_fpreg_t),
718 		.align		= sizeof(elf_fpreg_t),
719 		.get		= fpr_get,
720 		.set		= fpr_set,
721 	},
722 };
723 
724 static const struct user_regset_view user_mips64_view = {
725 	.name		= "mips64",
726 	.e_machine	= ELF_ARCH,
727 	.ei_osabi	= ELF_OSABI,
728 	.regsets	= mips64_regsets,
729 	.n		= ARRAY_SIZE(mips64_regsets),
730 };
731 
732 #ifdef CONFIG_MIPS32_N32
733 
734 static const struct user_regset_view user_mipsn32_view = {
735 	.name		= "mipsn32",
736 	.e_flags	= EF_MIPS_ABI2,
737 	.e_machine	= ELF_ARCH,
738 	.ei_osabi	= ELF_OSABI,
739 	.regsets	= mips64_regsets,
740 	.n		= ARRAY_SIZE(mips64_regsets),
741 };
742 
743 #endif /* CONFIG_MIPS32_N32 */
744 
745 #endif /* CONFIG_64BIT */
746 
task_user_regset_view(struct task_struct * task)747 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
748 {
749 #ifdef CONFIG_32BIT
750 	return &user_mips_view;
751 #else
752 #ifdef CONFIG_MIPS32_O32
753 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
754 		return &user_mips_view;
755 #endif
756 #ifdef CONFIG_MIPS32_N32
757 	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
758 		return &user_mipsn32_view;
759 #endif
760 	return &user_mips64_view;
761 #endif
762 }
763 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)764 long arch_ptrace(struct task_struct *child, long request,
765 		 unsigned long addr, unsigned long data)
766 {
767 	int ret;
768 	void __user *addrp = (void __user *) addr;
769 	void __user *datavp = (void __user *) data;
770 	unsigned long __user *datalp = (void __user *) data;
771 
772 	switch (request) {
773 	/* when I and D space are separate, these will need to be fixed. */
774 	case PTRACE_PEEKTEXT: /* read word at location addr. */
775 	case PTRACE_PEEKDATA:
776 		ret = generic_ptrace_peekdata(child, addr, data);
777 		break;
778 
779 	/* Read the word at location addr in the USER area. */
780 	case PTRACE_PEEKUSR: {
781 		struct pt_regs *regs;
782 		union fpureg *fregs;
783 		unsigned long tmp = 0;
784 
785 		regs = task_pt_regs(child);
786 		ret = 0;  /* Default return value. */
787 
788 		switch (addr) {
789 		case 0 ... 31:
790 			tmp = regs->regs[addr];
791 			break;
792 		case FPR_BASE ... FPR_BASE + 31:
793 			if (!tsk_used_math(child)) {
794 				/* FP not yet used */
795 				tmp = -1;
796 				break;
797 			}
798 			fregs = get_fpu_regs(child);
799 
800 #ifdef CONFIG_32BIT
801 			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
802 				/*
803 				 * The odd registers are actually the high
804 				 * order bits of the values stored in the even
805 				 * registers - unless we're using r2k_switch.S.
806 				 */
807 				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
808 						addr & 1);
809 				break;
810 			}
811 #endif
812 			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
813 			break;
814 		case PC:
815 			tmp = regs->cp0_epc;
816 			break;
817 		case CAUSE:
818 			tmp = regs->cp0_cause;
819 			break;
820 		case BADVADDR:
821 			tmp = regs->cp0_badvaddr;
822 			break;
823 		case MMHI:
824 			tmp = regs->hi;
825 			break;
826 		case MMLO:
827 			tmp = regs->lo;
828 			break;
829 #ifdef CONFIG_CPU_HAS_SMARTMIPS
830 		case ACX:
831 			tmp = regs->acx;
832 			break;
833 #endif
834 		case FPC_CSR:
835 			tmp = child->thread.fpu.fcr31;
836 			break;
837 		case FPC_EIR:
838 			/* implementation / version register */
839 			tmp = boot_cpu_data.fpu_id;
840 			break;
841 		case DSP_BASE ... DSP_BASE + 5: {
842 			dspreg_t *dregs;
843 
844 			if (!cpu_has_dsp) {
845 				tmp = 0;
846 				ret = -EIO;
847 				goto out;
848 			}
849 			dregs = __get_dsp_regs(child);
850 			tmp = dregs[addr - DSP_BASE];
851 			break;
852 		}
853 		case DSP_CONTROL:
854 			if (!cpu_has_dsp) {
855 				tmp = 0;
856 				ret = -EIO;
857 				goto out;
858 			}
859 			tmp = child->thread.dsp.dspcontrol;
860 			break;
861 		default:
862 			tmp = 0;
863 			ret = -EIO;
864 			goto out;
865 		}
866 		ret = put_user(tmp, datalp);
867 		break;
868 	}
869 
870 	/* when I and D space are separate, this will have to be fixed. */
871 	case PTRACE_POKETEXT: /* write the word at location addr. */
872 	case PTRACE_POKEDATA:
873 		ret = generic_ptrace_pokedata(child, addr, data);
874 		break;
875 
876 	case PTRACE_POKEUSR: {
877 		struct pt_regs *regs;
878 		ret = 0;
879 		regs = task_pt_regs(child);
880 
881 		switch (addr) {
882 		case 0 ... 31:
883 			regs->regs[addr] = data;
884 			break;
885 		case FPR_BASE ... FPR_BASE + 31: {
886 			union fpureg *fregs = get_fpu_regs(child);
887 
888 			init_fp_ctx(child);
889 #ifdef CONFIG_32BIT
890 			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
891 				/*
892 				 * The odd registers are actually the high
893 				 * order bits of the values stored in the even
894 				 * registers - unless we're using r2k_switch.S.
895 				 */
896 				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
897 					  addr & 1, data);
898 				break;
899 			}
900 #endif
901 			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
902 			break;
903 		}
904 		case PC:
905 			regs->cp0_epc = data;
906 			break;
907 		case MMHI:
908 			regs->hi = data;
909 			break;
910 		case MMLO:
911 			regs->lo = data;
912 			break;
913 #ifdef CONFIG_CPU_HAS_SMARTMIPS
914 		case ACX:
915 			regs->acx = data;
916 			break;
917 #endif
918 		case FPC_CSR:
919 			init_fp_ctx(child);
920 			ptrace_setfcr31(child, data);
921 			break;
922 		case DSP_BASE ... DSP_BASE + 5: {
923 			dspreg_t *dregs;
924 
925 			if (!cpu_has_dsp) {
926 				ret = -EIO;
927 				break;
928 			}
929 
930 			dregs = __get_dsp_regs(child);
931 			dregs[addr - DSP_BASE] = data;
932 			break;
933 		}
934 		case DSP_CONTROL:
935 			if (!cpu_has_dsp) {
936 				ret = -EIO;
937 				break;
938 			}
939 			child->thread.dsp.dspcontrol = data;
940 			break;
941 		default:
942 			/* The rest are not allowed. */
943 			ret = -EIO;
944 			break;
945 		}
946 		break;
947 		}
948 
949 	case PTRACE_GETREGS:
950 		ret = ptrace_getregs(child, datavp);
951 		break;
952 
953 	case PTRACE_SETREGS:
954 		ret = ptrace_setregs(child, datavp);
955 		break;
956 
957 	case PTRACE_GETFPREGS:
958 		ret = ptrace_getfpregs(child, datavp);
959 		break;
960 
961 	case PTRACE_SETFPREGS:
962 		ret = ptrace_setfpregs(child, datavp);
963 		break;
964 
965 	case PTRACE_GET_THREAD_AREA:
966 		ret = put_user(task_thread_info(child)->tp_value, datalp);
967 		break;
968 
969 	case PTRACE_GET_WATCH_REGS:
970 		ret = ptrace_get_watch_regs(child, addrp);
971 		break;
972 
973 	case PTRACE_SET_WATCH_REGS:
974 		ret = ptrace_set_watch_regs(child, addrp);
975 		break;
976 
977 	default:
978 		ret = ptrace_request(child, request, addr, data);
979 		break;
980 	}
981  out:
982 	return ret;
983 }
984 
985 /*
986  * Notification of system call entry/exit
987  * - triggered by current->work.syscall_trace
988  */
syscall_trace_enter(struct pt_regs * regs,long syscall)989 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
990 {
991 	user_exit();
992 
993 	current_thread_info()->syscall = syscall;
994 
995 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
996 	    tracehook_report_syscall_entry(regs))
997 		return -1;
998 
999 #ifdef CONFIG_SECCOMP
1000 	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1001 		int ret, i;
1002 		struct seccomp_data sd;
1003 		unsigned long args[6];
1004 
1005 		sd.nr = syscall;
1006 		sd.arch = syscall_get_arch();
1007 		syscall_get_arguments(current, regs, 0, 6, args);
1008 		for (i = 0; i < 6; i++)
1009 			sd.args[i] = args[i];
1010 		sd.instruction_pointer = KSTK_EIP(current);
1011 
1012 		ret = __secure_computing(&sd);
1013 		if (ret == -1)
1014 			return ret;
1015 	}
1016 #endif
1017 
1018 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1019 		trace_sys_enter(regs, regs->regs[2]);
1020 
1021 	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1022 			    regs->regs[6], regs->regs[7]);
1023 
1024 	/*
1025 	 * Negative syscall numbers are mistaken for rejected syscalls, but
1026 	 * won't have had the return value set appropriately, so we do so now.
1027 	 */
1028 	if (syscall < 0)
1029 		syscall_set_return_value(current, regs, -ENOSYS, 0);
1030 	return syscall;
1031 }
1032 
1033 /*
1034  * Notification of system call entry/exit
1035  * - triggered by current->work.syscall_trace
1036  */
syscall_trace_leave(struct pt_regs * regs)1037 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1038 {
1039         /*
1040 	 * We may come here right after calling schedule_user()
1041 	 * or do_notify_resume(), in which case we can be in RCU
1042 	 * user mode.
1043 	 */
1044 	user_exit();
1045 
1046 	audit_syscall_exit(regs);
1047 
1048 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1049 		trace_sys_exit(regs, regs_return_value(regs));
1050 
1051 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1052 		tracehook_report_syscall_exit(regs, 0);
1053 
1054 	user_enter();
1055 }
1056