• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38 
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/ia32.h>
47 #include <asm/idle.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
52 
53 #include "process.h"
54 
55 asmlinkage extern void ret_from_fork(void);
56 
57 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
58 
59 /* Prints also some state that isn't saved in the pt_regs */
__show_regs(struct pt_regs * regs,int all)60 void __show_regs(struct pt_regs *regs, int all)
61 {
62 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
63 	unsigned long d0, d1, d2, d3, d6, d7;
64 	unsigned int fsindex, gsindex;
65 	unsigned int ds, cs, es;
66 
67 	printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
68 	printk_address(regs->ip);
69 	printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
70 			regs->sp, regs->flags);
71 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
72 	       regs->ax, regs->bx, regs->cx);
73 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
74 	       regs->dx, regs->si, regs->di);
75 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
76 	       regs->bp, regs->r8, regs->r9);
77 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
78 	       regs->r10, regs->r11, regs->r12);
79 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
80 	       regs->r13, regs->r14, regs->r15);
81 
82 	asm("movl %%ds,%0" : "=r" (ds));
83 	asm("movl %%cs,%0" : "=r" (cs));
84 	asm("movl %%es,%0" : "=r" (es));
85 	asm("movl %%fs,%0" : "=r" (fsindex));
86 	asm("movl %%gs,%0" : "=r" (gsindex));
87 
88 	rdmsrl(MSR_FS_BASE, fs);
89 	rdmsrl(MSR_GS_BASE, gs);
90 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
91 
92 	if (!all)
93 		return;
94 
95 	cr0 = read_cr0();
96 	cr2 = read_cr2();
97 	cr3 = read_cr3();
98 	cr4 = __read_cr4();
99 
100 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
101 	       fs, fsindex, gs, gsindex, shadowgs);
102 	printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
103 			es, cr0);
104 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
105 			cr4);
106 
107 	get_debugreg(d0, 0);
108 	get_debugreg(d1, 1);
109 	get_debugreg(d2, 2);
110 	get_debugreg(d3, 3);
111 	get_debugreg(d6, 6);
112 	get_debugreg(d7, 7);
113 
114 	/* Only print out debug registers if they are in their non-default state. */
115 	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
116 	    (d6 == DR6_RESERVED) && (d7 == 0x400))
117 		return;
118 
119 	printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
120 	printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
121 
122 }
123 
release_thread(struct task_struct * dead_task)124 void release_thread(struct task_struct *dead_task)
125 {
126 	if (dead_task->mm) {
127 #ifdef CONFIG_MODIFY_LDT_SYSCALL
128 		if (dead_task->mm->context.ldt) {
129 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
130 				dead_task->comm,
131 				dead_task->mm->context.ldt->entries,
132 				dead_task->mm->context.ldt->size);
133 			BUG();
134 		}
135 #endif
136 	}
137 }
138 
set_32bit_tls(struct task_struct * t,int tls,u32 addr)139 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
140 {
141 	struct user_desc ud = {
142 		.base_addr = addr,
143 		.limit = 0xfffff,
144 		.seg_32bit = 1,
145 		.limit_in_pages = 1,
146 		.useable = 1,
147 	};
148 	struct desc_struct *desc = t->thread.tls_array;
149 	desc += tls;
150 	fill_ldt(desc, &ud);
151 }
152 
read_32bit_tls(struct task_struct * t,int tls)153 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
154 {
155 	return get_desc_base(&t->thread.tls_array[tls]);
156 }
157 
copy_thread_tls(unsigned long clone_flags,unsigned long sp,unsigned long arg,struct task_struct * p,unsigned long tls)158 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
159 		unsigned long arg, struct task_struct *p, unsigned long tls)
160 {
161 	int err;
162 	struct pt_regs *childregs;
163 	struct task_struct *me = current;
164 
165 	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
166 	childregs = task_pt_regs(p);
167 	p->thread.sp = (unsigned long) childregs;
168 	set_tsk_thread_flag(p, TIF_FORK);
169 	p->thread.io_bitmap_ptr = NULL;
170 
171 	savesegment(gs, p->thread.gsindex);
172 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
173 	savesegment(fs, p->thread.fsindex);
174 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
175 	savesegment(es, p->thread.es);
176 	savesegment(ds, p->thread.ds);
177 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
178 
179 	if (unlikely(p->flags & PF_KTHREAD)) {
180 		/* kernel thread */
181 		memset(childregs, 0, sizeof(struct pt_regs));
182 		childregs->sp = (unsigned long)childregs;
183 		childregs->ss = __KERNEL_DS;
184 		childregs->bx = sp; /* function */
185 		childregs->bp = arg;
186 		childregs->orig_ax = -1;
187 		childregs->cs = __KERNEL_CS | get_kernel_rpl();
188 		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
189 		return 0;
190 	}
191 	*childregs = *current_pt_regs();
192 
193 	childregs->ax = 0;
194 	if (sp)
195 		childregs->sp = sp;
196 
197 	err = -ENOMEM;
198 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
199 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
200 						  IO_BITMAP_BYTES, GFP_KERNEL);
201 		if (!p->thread.io_bitmap_ptr) {
202 			p->thread.io_bitmap_max = 0;
203 			return -ENOMEM;
204 		}
205 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
206 	}
207 
208 	/*
209 	 * Set a new TLS for the child thread?
210 	 */
211 	if (clone_flags & CLONE_SETTLS) {
212 #ifdef CONFIG_IA32_EMULATION
213 		if (is_ia32_task())
214 			err = do_set_thread_area(p, -1,
215 				(struct user_desc __user *)tls, 0);
216 		else
217 #endif
218 			err = do_arch_prctl(p, ARCH_SET_FS, tls);
219 		if (err)
220 			goto out;
221 	}
222 	err = 0;
223 out:
224 	if (err && p->thread.io_bitmap_ptr) {
225 		kfree(p->thread.io_bitmap_ptr);
226 		p->thread.io_bitmap_max = 0;
227 	}
228 
229 	return err;
230 }
231 
232 static void
start_thread_common(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp,unsigned int _cs,unsigned int _ss,unsigned int _ds)233 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
234 		    unsigned long new_sp,
235 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
236 {
237 	loadsegment(fs, 0);
238 	loadsegment(es, _ds);
239 	loadsegment(ds, _ds);
240 	load_gs_index(0);
241 	regs->ip		= new_ip;
242 	regs->sp		= new_sp;
243 	regs->cs		= _cs;
244 	regs->ss		= _ss;
245 	regs->flags		= X86_EFLAGS_IF;
246 	force_iret();
247 }
248 
249 void
start_thread(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp)250 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
251 {
252 	start_thread_common(regs, new_ip, new_sp,
253 			    __USER_CS, __USER_DS, 0);
254 }
255 EXPORT_SYMBOL_GPL(start_thread);
256 
257 #ifdef CONFIG_COMPAT
compat_start_thread(struct pt_regs * regs,u32 new_ip,u32 new_sp)258 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
259 {
260 	start_thread_common(regs, new_ip, new_sp,
261 			    test_thread_flag(TIF_X32)
262 			    ? __USER_CS : __USER32_CS,
263 			    __USER_DS, __USER_DS);
264 }
265 #endif
266 
267 /*
268  *	switch_to(x,y) should switch tasks from x to y.
269  *
270  * This could still be optimized:
271  * - fold all the options into a flag word and test it with a single test.
272  * - could test fs/gs bitsliced
273  *
274  * Kprobes not supported here. Set the probe on schedule instead.
275  * Function graph tracer not supported too.
276  */
277 __visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct * prev_p,struct task_struct * next_p)278 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
279 {
280 	struct thread_struct *prev = &prev_p->thread;
281 	struct thread_struct *next = &next_p->thread;
282 	struct fpu *prev_fpu = &prev->fpu;
283 	struct fpu *next_fpu = &next->fpu;
284 	int cpu = smp_processor_id();
285 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
286 	unsigned fsindex, gsindex;
287 	fpu_switch_t fpu_switch;
288 
289 	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
290 
291 	/* We must save %fs and %gs before load_TLS() because
292 	 * %fs and %gs may be cleared by load_TLS().
293 	 *
294 	 * (e.g. xen_load_tls())
295 	 */
296 	savesegment(fs, fsindex);
297 	savesegment(gs, gsindex);
298 
299 	/*
300 	 * Load TLS before restoring any segments so that segment loads
301 	 * reference the correct GDT entries.
302 	 */
303 	load_TLS(next, cpu);
304 
305 	/*
306 	 * Leave lazy mode, flushing any hypercalls made here.  This
307 	 * must be done after loading TLS entries in the GDT but before
308 	 * loading segments that might reference them, and and it must
309 	 * be done before fpu__restore(), so the TS bit is up to
310 	 * date.
311 	 */
312 	arch_end_context_switch(next_p);
313 
314 	/* Switch DS and ES.
315 	 *
316 	 * Reading them only returns the selectors, but writing them (if
317 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
318 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
319 	 * above.
320 	 *
321 	 * We therefore need to write new values to the segment
322 	 * registers on every context switch unless both the new and old
323 	 * values are zero.
324 	 *
325 	 * Note that we don't need to do anything for CS and SS, as
326 	 * those are saved and restored as part of pt_regs.
327 	 */
328 	savesegment(es, prev->es);
329 	if (unlikely(next->es | prev->es))
330 		loadsegment(es, next->es);
331 
332 	savesegment(ds, prev->ds);
333 	if (unlikely(next->ds | prev->ds))
334 		loadsegment(ds, next->ds);
335 
336 	/*
337 	 * Switch FS and GS.
338 	 *
339 	 * These are even more complicated than DS and ES: they have
340 	 * 64-bit bases are that controlled by arch_prctl.  Those bases
341 	 * only differ from the values in the GDT or LDT if the selector
342 	 * is 0.
343 	 *
344 	 * Loading the segment register resets the hidden base part of
345 	 * the register to 0 or the value from the GDT / LDT.  If the
346 	 * next base address zero, writing 0 to the segment register is
347 	 * much faster than using wrmsr to explicitly zero the base.
348 	 *
349 	 * The thread_struct.fsbase and thread_struct.gsbase values are 0
350 	 * if the fs and gs bases respectively are not overridden
351 	 * from the values implied by fsindex and gsindex.  They
352 	 * are nonzero, and store the nonzero base addresses, if
353 	 * the bases are overridden.
354 	 *
355 	 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
356 	 * be impossible.
357 	 *
358 	 * Therefore we need to reload the segment registers if either
359 	 * the old or new selector is nonzero, and we need to override
360 	 * the base address if next thread expects it to be overridden.
361 	 *
362 	 * This code is unnecessarily slow in the case where the old and
363 	 * new indexes are zero and the new base is nonzero -- it will
364 	 * unnecessarily write 0 to the selector before writing the new
365 	 * base address.
366 	 *
367 	 * Note: This all depends on arch_prctl being the only way that
368 	 * user code can override the segment base.  Once wrfsbase and
369 	 * wrgsbase are enabled, most of this code will need to change.
370 	 */
371 	if (unlikely(fsindex | next->fsindex | prev->fsbase)) {
372 		loadsegment(fs, next->fsindex);
373 
374 		/*
375 		 * If user code wrote a nonzero value to FS, then it also
376 		 * cleared the overridden base address.
377 		 *
378 		 * XXX: if user code wrote 0 to FS and cleared the base
379 		 * address itself, we won't notice and we'll incorrectly
380 		 * restore the prior base address next time we reschdule
381 		 * the process.
382 		 */
383 		if (fsindex)
384 			prev->fsbase = 0;
385 	}
386 	if (next->fsbase)
387 		wrmsrl(MSR_FS_BASE, next->fsbase);
388 	prev->fsindex = fsindex;
389 
390 	if (unlikely(gsindex | next->gsindex | prev->gsbase)) {
391 		load_gs_index(next->gsindex);
392 
393 		/* This works (and fails) the same way as fsindex above. */
394 		if (gsindex)
395 			prev->gsbase = 0;
396 	}
397 	if (next->gsbase)
398 		wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
399 	prev->gsindex = gsindex;
400 
401 	switch_fpu_finish(next_fpu, fpu_switch);
402 
403 	/*
404 	 * Switch the PDA and FPU contexts.
405 	 */
406 	this_cpu_write(current_task, next_p);
407 
408 	/* Reload esp0 and ss1.  This changes current_thread_info(). */
409 	load_sp0(tss, next);
410 
411 	switch_to_extra(prev_p, next_p);
412 
413 #ifdef CONFIG_XEN
414 	/*
415 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
416 	 * current_pt_regs()->flags may not match the current task's
417 	 * intended IOPL.  We need to switch it manually.
418 	 */
419 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
420 		     prev->iopl != next->iopl))
421 		xen_set_iopl_mask(next->iopl);
422 #endif
423 
424 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
425 		/*
426 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
427 		 * does not update the cached descriptor.  As a result, if we
428 		 * do SYSRET while SS is NULL, we'll end up in user mode with
429 		 * SS apparently equal to __USER_DS but actually unusable.
430 		 *
431 		 * The straightforward workaround would be to fix it up just
432 		 * before SYSRET, but that would slow down the system call
433 		 * fast paths.  Instead, we ensure that SS is never NULL in
434 		 * system call context.  We do this by replacing NULL SS
435 		 * selectors at every context switch.  SYSCALL sets up a valid
436 		 * SS, so the only way to get NULL is to re-enter the kernel
437 		 * from CPL 3 through an interrupt.  Since that can't happen
438 		 * in the same task as a running syscall, we are guaranteed to
439 		 * context switch between every interrupt vector entry and a
440 		 * subsequent SYSRET.
441 		 *
442 		 * We read SS first because SS reads are much faster than
443 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
444 		 * it previously had a different non-NULL value.
445 		 */
446 		unsigned short ss_sel;
447 		savesegment(ss, ss_sel);
448 		if (ss_sel != __KERNEL_DS)
449 			loadsegment(ss, __KERNEL_DS);
450 	}
451 
452 	return prev_p;
453 }
454 
set_personality_64bit(void)455 void set_personality_64bit(void)
456 {
457 	/* inherit personality from parent */
458 
459 	/* Make sure to be in 64bit mode */
460 	clear_thread_flag(TIF_IA32);
461 	clear_thread_flag(TIF_ADDR32);
462 	clear_thread_flag(TIF_X32);
463 
464 	/* Ensure the corresponding mm is not marked. */
465 	if (current->mm)
466 		current->mm->context.ia32_compat = 0;
467 
468 	/* TBD: overwrites user setup. Should have two bits.
469 	   But 64bit processes have always behaved this way,
470 	   so it's not too bad. The main problem is just that
471 	   32bit childs are affected again. */
472 	current->personality &= ~READ_IMPLIES_EXEC;
473 }
474 
set_personality_ia32(bool x32)475 void set_personality_ia32(bool x32)
476 {
477 	/* inherit personality from parent */
478 
479 	/* Make sure to be in 32bit mode */
480 	set_thread_flag(TIF_ADDR32);
481 
482 	/* Mark the associated mm as containing 32-bit tasks. */
483 	if (x32) {
484 		clear_thread_flag(TIF_IA32);
485 		set_thread_flag(TIF_X32);
486 		if (current->mm)
487 			current->mm->context.ia32_compat = TIF_X32;
488 		current->personality &= ~READ_IMPLIES_EXEC;
489 		/* is_compat_task() uses the presence of the x32
490 		   syscall bit flag to determine compat status */
491 		current_thread_info()->status &= ~TS_COMPAT;
492 	} else {
493 		set_thread_flag(TIF_IA32);
494 		clear_thread_flag(TIF_X32);
495 		if (current->mm)
496 			current->mm->context.ia32_compat = TIF_IA32;
497 		current->personality |= force_personality32;
498 		/* Prepare the first "return" to user space */
499 		current_thread_info()->status |= TS_COMPAT;
500 	}
501 }
502 EXPORT_SYMBOL_GPL(set_personality_ia32);
503 
do_arch_prctl(struct task_struct * task,int code,unsigned long addr)504 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
505 {
506 	int ret = 0;
507 	int doit = task == current;
508 	int cpu;
509 
510 	switch (code) {
511 	case ARCH_SET_GS:
512 		if (addr >= TASK_SIZE_OF(task))
513 			return -EPERM;
514 		cpu = get_cpu();
515 		/* handle small bases via the GDT because that's faster to
516 		   switch. */
517 		if (addr <= 0xffffffff) {
518 			set_32bit_tls(task, GS_TLS, addr);
519 			if (doit) {
520 				load_TLS(&task->thread, cpu);
521 				load_gs_index(GS_TLS_SEL);
522 			}
523 			task->thread.gsindex = GS_TLS_SEL;
524 			task->thread.gsbase = 0;
525 		} else {
526 			task->thread.gsindex = 0;
527 			task->thread.gsbase = addr;
528 			if (doit) {
529 				load_gs_index(0);
530 				ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
531 			}
532 		}
533 		put_cpu();
534 		break;
535 	case ARCH_SET_FS:
536 		/* Not strictly needed for fs, but do it for symmetry
537 		   with gs */
538 		if (addr >= TASK_SIZE_OF(task))
539 			return -EPERM;
540 		cpu = get_cpu();
541 		/* handle small bases via the GDT because that's faster to
542 		   switch. */
543 		if (addr <= 0xffffffff) {
544 			set_32bit_tls(task, FS_TLS, addr);
545 			if (doit) {
546 				load_TLS(&task->thread, cpu);
547 				loadsegment(fs, FS_TLS_SEL);
548 			}
549 			task->thread.fsindex = FS_TLS_SEL;
550 			task->thread.fsbase = 0;
551 		} else {
552 			task->thread.fsindex = 0;
553 			task->thread.fsbase = addr;
554 			if (doit) {
555 				/* set the selector to 0 to not confuse
556 				   __switch_to */
557 				loadsegment(fs, 0);
558 				ret = wrmsrl_safe(MSR_FS_BASE, addr);
559 			}
560 		}
561 		put_cpu();
562 		break;
563 	case ARCH_GET_FS: {
564 		unsigned long base;
565 		if (task->thread.fsindex == FS_TLS_SEL)
566 			base = read_32bit_tls(task, FS_TLS);
567 		else if (doit)
568 			rdmsrl(MSR_FS_BASE, base);
569 		else
570 			base = task->thread.fsbase;
571 		ret = put_user(base, (unsigned long __user *)addr);
572 		break;
573 	}
574 	case ARCH_GET_GS: {
575 		unsigned long base;
576 		unsigned gsindex;
577 		if (task->thread.gsindex == GS_TLS_SEL)
578 			base = read_32bit_tls(task, GS_TLS);
579 		else if (doit) {
580 			savesegment(gs, gsindex);
581 			if (gsindex)
582 				rdmsrl(MSR_KERNEL_GS_BASE, base);
583 			else
584 				base = task->thread.gsbase;
585 		} else
586 			base = task->thread.gsbase;
587 		ret = put_user(base, (unsigned long __user *)addr);
588 		break;
589 	}
590 
591 	default:
592 		ret = -EINVAL;
593 		break;
594 	}
595 
596 	return ret;
597 }
598 
sys_arch_prctl(int code,unsigned long addr)599 long sys_arch_prctl(int code, unsigned long addr)
600 {
601 	return do_arch_prctl(current, code, addr);
602 }
603 
KSTK_ESP(struct task_struct * task)604 unsigned long KSTK_ESP(struct task_struct *task)
605 {
606 	return task_pt_regs(task)->sp;
607 }
608