• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 Ross Biro
7  * Copyright (C) Linus Torvalds
8  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9  * Copyright (C) 1996 David S. Miller
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 1999 MIPS Technologies, Inc.
12  * Copyright (C) 2000 Ulf Carlsson
13  *
14  * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15  * binaries.
16  */
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/security.h>
28 #include <linux/tracehook.h>
29 #include <linux/audit.h>
30 #include <linux/seccomp.h>
31 #include <linux/ftrace.h>
32 
33 #include <asm/byteorder.h>
34 #include <asm/cpu.h>
35 #include <asm/cpu-info.h>
36 #include <asm/dsp.h>
37 #include <asm/fpu.h>
38 #include <asm/mipsregs.h>
39 #include <asm/mipsmtregs.h>
40 #include <asm/pgtable.h>
41 #include <asm/page.h>
42 #include <asm/syscall.h>
43 #include <asm/uaccess.h>
44 #include <asm/bootinfo.h>
45 #include <asm/reg.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
49 
init_fp_ctx(struct task_struct * target)50 static void init_fp_ctx(struct task_struct *target)
51 {
52 	/* If FP has been used then the target already has context */
53 	if (tsk_used_math(target))
54 		return;
55 
56 	/* Begin with data registers set to all 1s... */
57 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
58 
59 	/* ...and FCSR zeroed */
60 	target->thread.fpu.fcr31 = 0;
61 
62 	/*
63 	 * Record that the target has "used" math, such that the context
64 	 * just initialised, and any modifications made by the caller,
65 	 * aren't discarded.
66 	 */
67 	set_stopped_child_used_math(target);
68 }
69 
70 /*
71  * Called by kernel/ptrace.c when detaching..
72  *
73  * Make sure single step bits etc are not set.
74  */
ptrace_disable(struct task_struct * child)75 void ptrace_disable(struct task_struct *child)
76 {
77 	/* Don't load the watchpoint registers for the ex-child. */
78 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
79 }
80 
81 /*
82  * Read a general register set.	 We always use the 64-bit format, even
83  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
84  * Registers are sign extended to fill the available space.
85  */
ptrace_getregs(struct task_struct * child,struct user_pt_regs __user * data)86 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
87 {
88 	struct pt_regs *regs;
89 	int i;
90 
91 	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
92 		return -EIO;
93 
94 	regs = task_pt_regs(child);
95 
96 	for (i = 0; i < 32; i++)
97 		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
98 	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
99 	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
100 	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
101 	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
102 	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
103 	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
104 
105 	return 0;
106 }
107 
108 /*
109  * Write a general register set.  As for PTRACE_GETREGS, we always use
110  * the 64-bit format.  On a 32-bit kernel only the lower order half
111  * (according to endianness) will be used.
112  */
ptrace_setregs(struct task_struct * child,struct user_pt_regs __user * data)113 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
114 {
115 	struct pt_regs *regs;
116 	int i;
117 
118 	if (!access_ok(VERIFY_READ, data, 38 * 8))
119 		return -EIO;
120 
121 	regs = task_pt_regs(child);
122 
123 	for (i = 0; i < 32; i++)
124 		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
125 	__get_user(regs->lo, (__s64 __user *)&data->lo);
126 	__get_user(regs->hi, (__s64 __user *)&data->hi);
127 	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
128 
129 	/* badvaddr, status, and cause may not be written.  */
130 
131 	return 0;
132 }
133 
ptrace_getfpregs(struct task_struct * child,__u32 __user * data)134 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
135 {
136 	int i;
137 
138 	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
139 		return -EIO;
140 
141 	if (tsk_used_math(child)) {
142 		union fpureg *fregs = get_fpu_regs(child);
143 		for (i = 0; i < 32; i++)
144 			__put_user(get_fpr64(&fregs[i], 0),
145 				   i + (__u64 __user *)data);
146 	} else {
147 		for (i = 0; i < 32; i++)
148 			__put_user((__u64) -1, i + (__u64 __user *) data);
149 	}
150 
151 	__put_user(child->thread.fpu.fcr31, data + 64);
152 	__put_user(boot_cpu_data.fpu_id, data + 65);
153 
154 	return 0;
155 }
156 
ptrace_setfpregs(struct task_struct * child,__u32 __user * data)157 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
158 {
159 	union fpureg *fregs;
160 	u64 fpr_val;
161 	u32 fcr31;
162 	u32 value;
163 	u32 mask;
164 	int i;
165 
166 	if (!access_ok(VERIFY_READ, data, 33 * 8))
167 		return -EIO;
168 
169 	init_fp_ctx(child);
170 	fregs = get_fpu_regs(child);
171 
172 	for (i = 0; i < 32; i++) {
173 		__get_user(fpr_val, i + (__u64 __user *)data);
174 		set_fpr64(&fregs[i], 0, fpr_val);
175 	}
176 
177 	__get_user(value, data + 64);
178 	fcr31 = child->thread.fpu.fcr31;
179 	mask = boot_cpu_data.fpu_msk31;
180 	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
181 
182 	/* FIR may not be written.  */
183 
184 	return 0;
185 }
186 
ptrace_get_watch_regs(struct task_struct * child,struct pt_watch_regs __user * addr)187 int ptrace_get_watch_regs(struct task_struct *child,
188 			  struct pt_watch_regs __user *addr)
189 {
190 	enum pt_watch_style style;
191 	int i;
192 
193 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
194 		return -EIO;
195 	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
196 		return -EIO;
197 
198 #ifdef CONFIG_32BIT
199 	style = pt_watch_style_mips32;
200 #define WATCH_STYLE mips32
201 #else
202 	style = pt_watch_style_mips64;
203 #define WATCH_STYLE mips64
204 #endif
205 
206 	__put_user(style, &addr->style);
207 	__put_user(boot_cpu_data.watch_reg_use_cnt,
208 		   &addr->WATCH_STYLE.num_valid);
209 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
210 		__put_user(child->thread.watch.mips3264.watchlo[i],
211 			   &addr->WATCH_STYLE.watchlo[i]);
212 		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
213 			   &addr->WATCH_STYLE.watchhi[i]);
214 		__put_user(boot_cpu_data.watch_reg_masks[i],
215 			   &addr->WATCH_STYLE.watch_masks[i]);
216 	}
217 	for (; i < 8; i++) {
218 		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
219 		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
220 		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
221 	}
222 
223 	return 0;
224 }
225 
ptrace_set_watch_regs(struct task_struct * child,struct pt_watch_regs __user * addr)226 int ptrace_set_watch_regs(struct task_struct *child,
227 			  struct pt_watch_regs __user *addr)
228 {
229 	int i;
230 	int watch_active = 0;
231 	unsigned long lt[NUM_WATCH_REGS];
232 	u16 ht[NUM_WATCH_REGS];
233 
234 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
235 		return -EIO;
236 	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
237 		return -EIO;
238 	/* Check the values. */
239 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
240 		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
241 #ifdef CONFIG_32BIT
242 		if (lt[i] & __UA_LIMIT)
243 			return -EINVAL;
244 #else
245 		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
246 			if (lt[i] & 0xffffffff80000000UL)
247 				return -EINVAL;
248 		} else {
249 			if (lt[i] & __UA_LIMIT)
250 				return -EINVAL;
251 		}
252 #endif
253 		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
254 		if (ht[i] & ~0xff8)
255 			return -EINVAL;
256 	}
257 	/* Install them. */
258 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
259 		if (lt[i] & 7)
260 			watch_active = 1;
261 		child->thread.watch.mips3264.watchlo[i] = lt[i];
262 		/* Set the G bit. */
263 		child->thread.watch.mips3264.watchhi[i] = ht[i];
264 	}
265 
266 	if (watch_active)
267 		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
268 	else
269 		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
270 
271 	return 0;
272 }
273 
274 /* regset get/set implementations */
275 
276 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
277 
gpr32_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)278 static int gpr32_get(struct task_struct *target,
279 		     const struct user_regset *regset,
280 		     unsigned int pos, unsigned int count,
281 		     void *kbuf, void __user *ubuf)
282 {
283 	struct pt_regs *regs = task_pt_regs(target);
284 	u32 uregs[ELF_NGREG] = {};
285 	unsigned i;
286 
287 	for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
288 		/* k0/k1 are copied as zero. */
289 		if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
290 			continue;
291 
292 		uregs[i] = regs->regs[i - MIPS32_EF_R0];
293 	}
294 
295 	uregs[MIPS32_EF_LO] = regs->lo;
296 	uregs[MIPS32_EF_HI] = regs->hi;
297 	uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
298 	uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
299 	uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
300 	uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
301 
302 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
303 				   sizeof(uregs));
304 }
305 
gpr32_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)306 static int gpr32_set(struct task_struct *target,
307 		     const struct user_regset *regset,
308 		     unsigned int pos, unsigned int count,
309 		     const void *kbuf, const void __user *ubuf)
310 {
311 	struct pt_regs *regs = task_pt_regs(target);
312 	u32 uregs[ELF_NGREG];
313 	unsigned start, num_regs, i;
314 	int err;
315 
316 	start = pos / sizeof(u32);
317 	num_regs = count / sizeof(u32);
318 
319 	if (start + num_regs > ELF_NGREG)
320 		return -EIO;
321 
322 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
323 				 sizeof(uregs));
324 	if (err)
325 		return err;
326 
327 	for (i = start; i < num_regs; i++) {
328 		/*
329 		 * Cast all values to signed here so that if this is a 64-bit
330 		 * kernel, the supplied 32-bit values will be sign extended.
331 		 */
332 		switch (i) {
333 		case MIPS32_EF_R1 ... MIPS32_EF_R25:
334 			/* k0/k1 are ignored. */
335 		case MIPS32_EF_R28 ... MIPS32_EF_R31:
336 			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
337 			break;
338 		case MIPS32_EF_LO:
339 			regs->lo = (s32)uregs[i];
340 			break;
341 		case MIPS32_EF_HI:
342 			regs->hi = (s32)uregs[i];
343 			break;
344 		case MIPS32_EF_CP0_EPC:
345 			regs->cp0_epc = (s32)uregs[i];
346 			break;
347 		}
348 	}
349 
350 	return 0;
351 }
352 
353 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
354 
355 #ifdef CONFIG_64BIT
356 
gpr64_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)357 static int gpr64_get(struct task_struct *target,
358 		     const struct user_regset *regset,
359 		     unsigned int pos, unsigned int count,
360 		     void *kbuf, void __user *ubuf)
361 {
362 	struct pt_regs *regs = task_pt_regs(target);
363 	u64 uregs[ELF_NGREG] = {};
364 	unsigned i;
365 
366 	for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
367 		/* k0/k1 are copied as zero. */
368 		if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
369 			continue;
370 
371 		uregs[i] = regs->regs[i - MIPS64_EF_R0];
372 	}
373 
374 	uregs[MIPS64_EF_LO] = regs->lo;
375 	uregs[MIPS64_EF_HI] = regs->hi;
376 	uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
377 	uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
378 	uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
379 	uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
380 
381 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
382 				   sizeof(uregs));
383 }
384 
gpr64_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)385 static int gpr64_set(struct task_struct *target,
386 		     const struct user_regset *regset,
387 		     unsigned int pos, unsigned int count,
388 		     const void *kbuf, const void __user *ubuf)
389 {
390 	struct pt_regs *regs = task_pt_regs(target);
391 	u64 uregs[ELF_NGREG];
392 	unsigned start, num_regs, i;
393 	int err;
394 
395 	start = pos / sizeof(u64);
396 	num_regs = count / sizeof(u64);
397 
398 	if (start + num_regs > ELF_NGREG)
399 		return -EIO;
400 
401 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
402 				 sizeof(uregs));
403 	if (err)
404 		return err;
405 
406 	for (i = start; i < num_regs; i++) {
407 		switch (i) {
408 		case MIPS64_EF_R1 ... MIPS64_EF_R25:
409 			/* k0/k1 are ignored. */
410 		case MIPS64_EF_R28 ... MIPS64_EF_R31:
411 			regs->regs[i - MIPS64_EF_R0] = uregs[i];
412 			break;
413 		case MIPS64_EF_LO:
414 			regs->lo = uregs[i];
415 			break;
416 		case MIPS64_EF_HI:
417 			regs->hi = uregs[i];
418 			break;
419 		case MIPS64_EF_CP0_EPC:
420 			regs->cp0_epc = uregs[i];
421 			break;
422 		}
423 	}
424 
425 	return 0;
426 }
427 
428 #endif /* CONFIG_64BIT */
429 
430 /*
431  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
432  * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
433  * correspond 1:1 to buffer slots.  Only general registers are copied.
434  */
fpr_get_fpa(struct task_struct * target,unsigned int * pos,unsigned int * count,void ** kbuf,void __user ** ubuf)435 static int fpr_get_fpa(struct task_struct *target,
436 		       unsigned int *pos, unsigned int *count,
437 		       void **kbuf, void __user **ubuf)
438 {
439 	return user_regset_copyout(pos, count, kbuf, ubuf,
440 				   &target->thread.fpu,
441 				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
442 }
443 
444 /*
445  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
446  * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
447  * general register slots are copied to buffer slots.  Only general
448  * registers are copied.
449  */
fpr_get_msa(struct task_struct * target,unsigned int * pos,unsigned int * count,void ** kbuf,void __user ** ubuf)450 static int fpr_get_msa(struct task_struct *target,
451 		       unsigned int *pos, unsigned int *count,
452 		       void **kbuf, void __user **ubuf)
453 {
454 	unsigned int i;
455 	u64 fpr_val;
456 	int err;
457 
458 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
459 	for (i = 0; i < NUM_FPU_REGS; i++) {
460 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
461 		err = user_regset_copyout(pos, count, kbuf, ubuf,
462 					  &fpr_val, i * sizeof(elf_fpreg_t),
463 					  (i + 1) * sizeof(elf_fpreg_t));
464 		if (err)
465 			return err;
466 	}
467 
468 	return 0;
469 }
470 
471 /*
472  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
473  * Choose the appropriate helper for general registers, and then copy
474  * the FCSR register separately.
475  */
fpr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)476 static int fpr_get(struct task_struct *target,
477 		   const struct user_regset *regset,
478 		   unsigned int pos, unsigned int count,
479 		   void *kbuf, void __user *ubuf)
480 {
481 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
482 	int err;
483 
484 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
485 		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
486 	else
487 		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
488 	if (err)
489 		return err;
490 
491 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
492 				  &target->thread.fpu.fcr31,
493 				  fcr31_pos, fcr31_pos + sizeof(u32));
494 
495 	return err;
496 }
497 
498 /*
499  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
500  * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
501  * context's general register slots.  Only general registers are copied.
502  */
fpr_set_fpa(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)503 static int fpr_set_fpa(struct task_struct *target,
504 		       unsigned int *pos, unsigned int *count,
505 		       const void **kbuf, const void __user **ubuf)
506 {
507 	return user_regset_copyin(pos, count, kbuf, ubuf,
508 				  &target->thread.fpu,
509 				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
510 }
511 
512 /*
513  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
514  * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
515  * bits only of FP context's general register slots.  Only general
516  * registers are copied.
517  */
fpr_set_msa(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)518 static int fpr_set_msa(struct task_struct *target,
519 		       unsigned int *pos, unsigned int *count,
520 		       const void **kbuf, const void __user **ubuf)
521 {
522 	unsigned int i;
523 	u64 fpr_val;
524 	int err;
525 
526 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
527 	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
528 		err = user_regset_copyin(pos, count, kbuf, ubuf,
529 					 &fpr_val, i * sizeof(elf_fpreg_t),
530 					 (i + 1) * sizeof(elf_fpreg_t));
531 		if (err)
532 			return err;
533 		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
534 	}
535 
536 	return 0;
537 }
538 
539 /*
540  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
541  * Choose the appropriate helper for general registers, and then copy
542  * the FCSR register separately.
543  *
544  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
545  * which is supposed to have been guaranteed by the kernel before
546  * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
547  * so that we can safely avoid preinitializing temporaries for
548  * partial register writes.
549  */
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)550 static int fpr_set(struct task_struct *target,
551 		   const struct user_regset *regset,
552 		   unsigned int pos, unsigned int count,
553 		   const void *kbuf, const void __user *ubuf)
554 {
555 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
556 	u32 fcr31;
557 	int err;
558 
559 	BUG_ON(count % sizeof(elf_fpreg_t));
560 
561 	if (pos + count > sizeof(elf_fpregset_t))
562 		return -EIO;
563 
564 	init_fp_ctx(target);
565 
566 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
567 		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
568 	else
569 		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
570 	if (err)
571 		return err;
572 
573 	if (count > 0) {
574 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
575 					 &fcr31,
576 					 fcr31_pos, fcr31_pos + sizeof(u32));
577 		if (err)
578 			return err;
579 
580 		target->thread.fpu.fcr31 = fcr31 & ~FPU_CSR_ALL_X;
581 	}
582 
583 	return err;
584 }
585 
586 enum mips_regset {
587 	REGSET_GPR,
588 	REGSET_FPR,
589 };
590 
591 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
592 
593 static const struct user_regset mips_regsets[] = {
594 	[REGSET_GPR] = {
595 		.core_note_type	= NT_PRSTATUS,
596 		.n		= ELF_NGREG,
597 		.size		= sizeof(unsigned int),
598 		.align		= sizeof(unsigned int),
599 		.get		= gpr32_get,
600 		.set		= gpr32_set,
601 	},
602 	[REGSET_FPR] = {
603 		.core_note_type	= NT_PRFPREG,
604 		.n		= ELF_NFPREG,
605 		.size		= sizeof(elf_fpreg_t),
606 		.align		= sizeof(elf_fpreg_t),
607 		.get		= fpr_get,
608 		.set		= fpr_set,
609 	},
610 };
611 
612 static const struct user_regset_view user_mips_view = {
613 	.name		= "mips",
614 	.e_machine	= ELF_ARCH,
615 	.ei_osabi	= ELF_OSABI,
616 	.regsets	= mips_regsets,
617 	.n		= ARRAY_SIZE(mips_regsets),
618 };
619 
620 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
621 
622 #ifdef CONFIG_64BIT
623 
624 static const struct user_regset mips64_regsets[] = {
625 	[REGSET_GPR] = {
626 		.core_note_type	= NT_PRSTATUS,
627 		.n		= ELF_NGREG,
628 		.size		= sizeof(unsigned long),
629 		.align		= sizeof(unsigned long),
630 		.get		= gpr64_get,
631 		.set		= gpr64_set,
632 	},
633 	[REGSET_FPR] = {
634 		.core_note_type	= NT_PRFPREG,
635 		.n		= ELF_NFPREG,
636 		.size		= sizeof(elf_fpreg_t),
637 		.align		= sizeof(elf_fpreg_t),
638 		.get		= fpr_get,
639 		.set		= fpr_set,
640 	},
641 };
642 
643 static const struct user_regset_view user_mips64_view = {
644 	.name		= "mips64",
645 	.e_machine	= ELF_ARCH,
646 	.ei_osabi	= ELF_OSABI,
647 	.regsets	= mips64_regsets,
648 	.n		= ARRAY_SIZE(mips64_regsets),
649 };
650 
651 #ifdef CONFIG_MIPS32_N32
652 
653 static const struct user_regset_view user_mipsn32_view = {
654 	.name		= "mipsn32",
655 	.e_flags	= EF_MIPS_ABI2,
656 	.e_machine	= ELF_ARCH,
657 	.ei_osabi	= ELF_OSABI,
658 	.regsets	= mips64_regsets,
659 	.n		= ARRAY_SIZE(mips64_regsets),
660 };
661 
662 #endif /* CONFIG_MIPS32_N32 */
663 
664 #endif /* CONFIG_64BIT */
665 
task_user_regset_view(struct task_struct * task)666 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
667 {
668 #ifdef CONFIG_32BIT
669 	return &user_mips_view;
670 #else
671 #ifdef CONFIG_MIPS32_O32
672 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
673 		return &user_mips_view;
674 #endif
675 #ifdef CONFIG_MIPS32_N32
676 	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
677 		return &user_mipsn32_view;
678 #endif
679 	return &user_mips64_view;
680 #endif
681 }
682 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)683 long arch_ptrace(struct task_struct *child, long request,
684 		 unsigned long addr, unsigned long data)
685 {
686 	int ret;
687 	void __user *addrp = (void __user *) addr;
688 	void __user *datavp = (void __user *) data;
689 	unsigned long __user *datalp = (void __user *) data;
690 
691 	switch (request) {
692 	/* when I and D space are separate, these will need to be fixed. */
693 	case PTRACE_PEEKTEXT: /* read word at location addr. */
694 	case PTRACE_PEEKDATA:
695 		ret = generic_ptrace_peekdata(child, addr, data);
696 		break;
697 
698 	/* Read the word at location addr in the USER area. */
699 	case PTRACE_PEEKUSR: {
700 		struct pt_regs *regs;
701 		union fpureg *fregs;
702 		unsigned long tmp = 0;
703 
704 		regs = task_pt_regs(child);
705 		ret = 0;  /* Default return value. */
706 
707 		switch (addr) {
708 		case 0 ... 31:
709 			tmp = regs->regs[addr];
710 			break;
711 		case FPR_BASE ... FPR_BASE + 31:
712 			if (!tsk_used_math(child)) {
713 				/* FP not yet used */
714 				tmp = -1;
715 				break;
716 			}
717 			fregs = get_fpu_regs(child);
718 
719 #ifdef CONFIG_32BIT
720 			if (test_thread_flag(TIF_32BIT_FPREGS)) {
721 				/*
722 				 * The odd registers are actually the high
723 				 * order bits of the values stored in the even
724 				 * registers - unless we're using r2k_switch.S.
725 				 */
726 				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
727 						addr & 1);
728 				break;
729 			}
730 #endif
731 			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
732 			break;
733 		case PC:
734 			tmp = regs->cp0_epc;
735 			break;
736 		case CAUSE:
737 			tmp = regs->cp0_cause;
738 			break;
739 		case BADVADDR:
740 			tmp = regs->cp0_badvaddr;
741 			break;
742 		case MMHI:
743 			tmp = regs->hi;
744 			break;
745 		case MMLO:
746 			tmp = regs->lo;
747 			break;
748 #ifdef CONFIG_CPU_HAS_SMARTMIPS
749 		case ACX:
750 			tmp = regs->acx;
751 			break;
752 #endif
753 		case FPC_CSR:
754 			tmp = child->thread.fpu.fcr31;
755 			break;
756 		case FPC_EIR:
757 			/* implementation / version register */
758 			tmp = boot_cpu_data.fpu_id;
759 			break;
760 		case DSP_BASE ... DSP_BASE + 5: {
761 			dspreg_t *dregs;
762 
763 			if (!cpu_has_dsp) {
764 				tmp = 0;
765 				ret = -EIO;
766 				goto out;
767 			}
768 			dregs = __get_dsp_regs(child);
769 			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
770 			break;
771 		}
772 		case DSP_CONTROL:
773 			if (!cpu_has_dsp) {
774 				tmp = 0;
775 				ret = -EIO;
776 				goto out;
777 			}
778 			tmp = child->thread.dsp.dspcontrol;
779 			break;
780 		default:
781 			tmp = 0;
782 			ret = -EIO;
783 			goto out;
784 		}
785 		ret = put_user(tmp, datalp);
786 		break;
787 	}
788 
789 	/* when I and D space are separate, this will have to be fixed. */
790 	case PTRACE_POKETEXT: /* write the word at location addr. */
791 	case PTRACE_POKEDATA:
792 		ret = generic_ptrace_pokedata(child, addr, data);
793 		break;
794 
795 	case PTRACE_POKEUSR: {
796 		struct pt_regs *regs;
797 		ret = 0;
798 		regs = task_pt_regs(child);
799 
800 		switch (addr) {
801 		case 0 ... 31:
802 			regs->regs[addr] = data;
803 			break;
804 		case FPR_BASE ... FPR_BASE + 31: {
805 			union fpureg *fregs = get_fpu_regs(child);
806 
807 			init_fp_ctx(child);
808 #ifdef CONFIG_32BIT
809 			if (test_thread_flag(TIF_32BIT_FPREGS)) {
810 				/*
811 				 * The odd registers are actually the high
812 				 * order bits of the values stored in the even
813 				 * registers - unless we're using r2k_switch.S.
814 				 */
815 				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
816 					  addr & 1, data);
817 				break;
818 			}
819 #endif
820 			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
821 			break;
822 		}
823 		case PC:
824 			regs->cp0_epc = data;
825 			break;
826 		case MMHI:
827 			regs->hi = data;
828 			break;
829 		case MMLO:
830 			regs->lo = data;
831 			break;
832 #ifdef CONFIG_CPU_HAS_SMARTMIPS
833 		case ACX:
834 			regs->acx = data;
835 			break;
836 #endif
837 		case FPC_CSR:
838 			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
839 			break;
840 		case DSP_BASE ... DSP_BASE + 5: {
841 			dspreg_t *dregs;
842 
843 			if (!cpu_has_dsp) {
844 				ret = -EIO;
845 				break;
846 			}
847 
848 			dregs = __get_dsp_regs(child);
849 			dregs[addr - DSP_BASE] = data;
850 			break;
851 		}
852 		case DSP_CONTROL:
853 			if (!cpu_has_dsp) {
854 				ret = -EIO;
855 				break;
856 			}
857 			child->thread.dsp.dspcontrol = data;
858 			break;
859 		default:
860 			/* The rest are not allowed. */
861 			ret = -EIO;
862 			break;
863 		}
864 		break;
865 		}
866 
867 	case PTRACE_GETREGS:
868 		ret = ptrace_getregs(child, datavp);
869 		break;
870 
871 	case PTRACE_SETREGS:
872 		ret = ptrace_setregs(child, datavp);
873 		break;
874 
875 	case PTRACE_GETFPREGS:
876 		ret = ptrace_getfpregs(child, datavp);
877 		break;
878 
879 	case PTRACE_SETFPREGS:
880 		ret = ptrace_setfpregs(child, datavp);
881 		break;
882 
883 	case PTRACE_GET_THREAD_AREA:
884 		ret = put_user(task_thread_info(child)->tp_value, datalp);
885 		break;
886 
887 	case PTRACE_GET_WATCH_REGS:
888 		ret = ptrace_get_watch_regs(child, addrp);
889 		break;
890 
891 	case PTRACE_SET_WATCH_REGS:
892 		ret = ptrace_set_watch_regs(child, addrp);
893 		break;
894 
895 	default:
896 		ret = ptrace_request(child, request, addr, data);
897 		break;
898 	}
899  out:
900 	return ret;
901 }
902 
903 /*
904  * Notification of system call entry/exit
905  * - triggered by current->work.syscall_trace
906  */
syscall_trace_enter(struct pt_regs * regs,long syscall)907 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
908 {
909 	long ret = 0;
910 	user_exit();
911 
912 	current_thread_info()->syscall = syscall;
913 
914 	if (secure_computing() == -1)
915 		return -1;
916 
917 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
918 	    tracehook_report_syscall_entry(regs))
919 		ret = -1;
920 
921 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
922 		trace_sys_enter(regs, regs->regs[2]);
923 
924 	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
925 			    regs->regs[6], regs->regs[7]);
926 	return syscall;
927 }
928 
929 /*
930  * Notification of system call entry/exit
931  * - triggered by current->work.syscall_trace
932  */
syscall_trace_leave(struct pt_regs * regs)933 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
934 {
935         /*
936 	 * We may come here right after calling schedule_user()
937 	 * or do_notify_resume(), in which case we can be in RCU
938 	 * user mode.
939 	 */
940 	user_exit();
941 
942 	audit_syscall_exit(regs);
943 
944 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
945 		trace_sys_exit(regs, regs_return_value(regs));
946 
947 	if (test_thread_flag(TIF_SYSCALL_TRACE))
948 		tracehook_report_syscall_exit(regs, 0);
949 
950 	user_enter();
951 }
952