1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/i387.h>
42 #include <asm/fpu-internal.h>
43 #include <asm/mmu_context.h>
44 #include <asm/prctl.h>
45 #include <asm/desc.h>
46 #include <asm/proto.h>
47 #include <asm/ia32.h>
48 #include <asm/idle.h>
49 #include <asm/syscalls.h>
50 #include <asm/debugreg.h>
51 #include <asm/switch_to.h>
52 #include <asm/xen/hypervisor.h>
53
54 asmlinkage extern void ret_from_fork(void);
55
56 __visible DEFINE_PER_CPU(unsigned long, old_rsp);
57
58 /* Prints also some state that isn't saved in the pt_regs */
__show_regs(struct pt_regs * regs,int all)59 void __show_regs(struct pt_regs *regs, int all)
60 {
61 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
62 unsigned long d0, d1, d2, d3, d6, d7;
63 unsigned int fsindex, gsindex;
64 unsigned int ds, cs, es;
65
66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
67 printk_address(regs->ip);
68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
69 regs->sp, regs->flags);
70 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
71 regs->ax, regs->bx, regs->cx);
72 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
73 regs->dx, regs->si, regs->di);
74 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
75 regs->bp, regs->r8, regs->r9);
76 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
77 regs->r10, regs->r11, regs->r12);
78 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
79 regs->r13, regs->r14, regs->r15);
80
81 asm("movl %%ds,%0" : "=r" (ds));
82 asm("movl %%cs,%0" : "=r" (cs));
83 asm("movl %%es,%0" : "=r" (es));
84 asm("movl %%fs,%0" : "=r" (fsindex));
85 asm("movl %%gs,%0" : "=r" (gsindex));
86
87 rdmsrl(MSR_FS_BASE, fs);
88 rdmsrl(MSR_GS_BASE, gs);
89 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
90
91 if (!all)
92 return;
93
94 cr0 = read_cr0();
95 cr2 = read_cr2();
96 cr3 = read_cr3();
97 cr4 = __read_cr4();
98
99 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
100 fs, fsindex, gs, gsindex, shadowgs);
101 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
102 es, cr0);
103 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
104 cr4);
105
106 get_debugreg(d0, 0);
107 get_debugreg(d1, 1);
108 get_debugreg(d2, 2);
109 get_debugreg(d3, 3);
110 get_debugreg(d6, 6);
111 get_debugreg(d7, 7);
112
113 /* Only print out debug registers if they are in their non-default state. */
114 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
115 (d6 == DR6_RESERVED) && (d7 == 0x400))
116 return;
117
118 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
119 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
120
121 }
122
release_thread(struct task_struct * dead_task)123 void release_thread(struct task_struct *dead_task)
124 {
125 if (dead_task->mm) {
126 if (dead_task->mm->context.ldt) {
127 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
128 dead_task->comm,
129 dead_task->mm->context.ldt,
130 dead_task->mm->context.ldt->size);
131 BUG();
132 }
133 }
134 }
135
set_32bit_tls(struct task_struct * t,int tls,u32 addr)136 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
137 {
138 struct user_desc ud = {
139 .base_addr = addr,
140 .limit = 0xfffff,
141 .seg_32bit = 1,
142 .limit_in_pages = 1,
143 .useable = 1,
144 };
145 struct desc_struct *desc = t->thread.tls_array;
146 desc += tls;
147 fill_ldt(desc, &ud);
148 }
149
read_32bit_tls(struct task_struct * t,int tls)150 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
151 {
152 return get_desc_base(&t->thread.tls_array[tls]);
153 }
154
copy_thread(unsigned long clone_flags,unsigned long sp,unsigned long arg,struct task_struct * p)155 int copy_thread(unsigned long clone_flags, unsigned long sp,
156 unsigned long arg, struct task_struct *p)
157 {
158 int err;
159 struct pt_regs *childregs;
160 struct task_struct *me = current;
161
162 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
163 childregs = task_pt_regs(p);
164 p->thread.sp = (unsigned long) childregs;
165 p->thread.usersp = me->thread.usersp;
166 set_tsk_thread_flag(p, TIF_FORK);
167 p->thread.io_bitmap_ptr = NULL;
168
169 savesegment(gs, p->thread.gsindex);
170 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
171 savesegment(fs, p->thread.fsindex);
172 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
173 savesegment(es, p->thread.es);
174 savesegment(ds, p->thread.ds);
175 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
176
177 if (unlikely(p->flags & PF_KTHREAD)) {
178 /* kernel thread */
179 memset(childregs, 0, sizeof(struct pt_regs));
180 childregs->sp = (unsigned long)childregs;
181 childregs->ss = __KERNEL_DS;
182 childregs->bx = sp; /* function */
183 childregs->bp = arg;
184 childregs->orig_ax = -1;
185 childregs->cs = __KERNEL_CS | get_kernel_rpl();
186 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
187 return 0;
188 }
189 *childregs = *current_pt_regs();
190
191 childregs->ax = 0;
192 if (sp)
193 childregs->sp = sp;
194
195 err = -ENOMEM;
196 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
197 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
198 IO_BITMAP_BYTES, GFP_KERNEL);
199 if (!p->thread.io_bitmap_ptr) {
200 p->thread.io_bitmap_max = 0;
201 return -ENOMEM;
202 }
203 set_tsk_thread_flag(p, TIF_IO_BITMAP);
204 }
205
206 /*
207 * Set a new TLS for the child thread?
208 */
209 if (clone_flags & CLONE_SETTLS) {
210 #ifdef CONFIG_IA32_EMULATION
211 if (test_thread_flag(TIF_IA32))
212 err = do_set_thread_area(p, -1,
213 (struct user_desc __user *)childregs->si, 0);
214 else
215 #endif
216 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
217 if (err)
218 goto out;
219 }
220 err = 0;
221 out:
222 if (err && p->thread.io_bitmap_ptr) {
223 kfree(p->thread.io_bitmap_ptr);
224 p->thread.io_bitmap_max = 0;
225 }
226
227 return err;
228 }
229
230 static void
start_thread_common(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp,unsigned int _cs,unsigned int _ss,unsigned int _ds)231 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
232 unsigned long new_sp,
233 unsigned int _cs, unsigned int _ss, unsigned int _ds)
234 {
235 loadsegment(fs, 0);
236 loadsegment(es, _ds);
237 loadsegment(ds, _ds);
238 load_gs_index(0);
239 current->thread.usersp = new_sp;
240 regs->ip = new_ip;
241 regs->sp = new_sp;
242 this_cpu_write(old_rsp, new_sp);
243 regs->cs = _cs;
244 regs->ss = _ss;
245 regs->flags = X86_EFLAGS_IF;
246 }
247
248 void
start_thread(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp)249 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
250 {
251 start_thread_common(regs, new_ip, new_sp,
252 __USER_CS, __USER_DS, 0);
253 }
254
255 #ifdef CONFIG_IA32_EMULATION
start_thread_ia32(struct pt_regs * regs,u32 new_ip,u32 new_sp)256 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
257 {
258 start_thread_common(regs, new_ip, new_sp,
259 test_thread_flag(TIF_X32)
260 ? __USER_CS : __USER32_CS,
261 __USER_DS, __USER_DS);
262 }
263 #endif
264
265 /*
266 * switch_to(x,y) should switch tasks from x to y.
267 *
268 * This could still be optimized:
269 * - fold all the options into a flag word and test it with a single test.
270 * - could test fs/gs bitsliced
271 *
272 * Kprobes not supported here. Set the probe on schedule instead.
273 * Function graph tracer not supported too.
274 */
275 __visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct * prev_p,struct task_struct * next_p)276 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
277 {
278 struct thread_struct *prev = &prev_p->thread;
279 struct thread_struct *next = &next_p->thread;
280 int cpu = smp_processor_id();
281 struct tss_struct *tss = &per_cpu(init_tss, cpu);
282 unsigned fsindex, gsindex;
283 fpu_switch_t fpu;
284
285 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
286
287 /* Reload esp0 and ss1. */
288 load_sp0(tss, next);
289
290 /* We must save %fs and %gs before load_TLS() because
291 * %fs and %gs may be cleared by load_TLS().
292 *
293 * (e.g. xen_load_tls())
294 */
295 savesegment(fs, fsindex);
296 savesegment(gs, gsindex);
297
298 /*
299 * Load TLS before restoring any segments so that segment loads
300 * reference the correct GDT entries.
301 */
302 load_TLS(next, cpu);
303
304 /*
305 * Leave lazy mode, flushing any hypercalls made here. This
306 * must be done after loading TLS entries in the GDT but before
307 * loading segments that might reference them, and and it must
308 * be done before math_state_restore, so the TS bit is up to
309 * date.
310 */
311 arch_end_context_switch(next_p);
312
313 /* Switch DS and ES.
314 *
315 * Reading them only returns the selectors, but writing them (if
316 * nonzero) loads the full descriptor from the GDT or LDT. The
317 * LDT for next is loaded in switch_mm, and the GDT is loaded
318 * above.
319 *
320 * We therefore need to write new values to the segment
321 * registers on every context switch unless both the new and old
322 * values are zero.
323 *
324 * Note that we don't need to do anything for CS and SS, as
325 * those are saved and restored as part of pt_regs.
326 */
327 savesegment(es, prev->es);
328 if (unlikely(next->es | prev->es))
329 loadsegment(es, next->es);
330
331 savesegment(ds, prev->ds);
332 if (unlikely(next->ds | prev->ds))
333 loadsegment(ds, next->ds);
334
335 /*
336 * Switch FS and GS.
337 *
338 * These are even more complicated than FS and GS: they have
339 * 64-bit bases are that controlled by arch_prctl. Those bases
340 * only differ from the values in the GDT or LDT if the selector
341 * is 0.
342 *
343 * Loading the segment register resets the hidden base part of
344 * the register to 0 or the value from the GDT / LDT. If the
345 * next base address zero, writing 0 to the segment register is
346 * much faster than using wrmsr to explicitly zero the base.
347 *
348 * The thread_struct.fs and thread_struct.gs values are 0
349 * if the fs and gs bases respectively are not overridden
350 * from the values implied by fsindex and gsindex. They
351 * are nonzero, and store the nonzero base addresses, if
352 * the bases are overridden.
353 *
354 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
355 * be impossible.
356 *
357 * Therefore we need to reload the segment registers if either
358 * the old or new selector is nonzero, and we need to override
359 * the base address if next thread expects it to be overridden.
360 *
361 * This code is unnecessarily slow in the case where the old and
362 * new indexes are zero and the new base is nonzero -- it will
363 * unnecessarily write 0 to the selector before writing the new
364 * base address.
365 *
366 * Note: This all depends on arch_prctl being the only way that
367 * user code can override the segment base. Once wrfsbase and
368 * wrgsbase are enabled, most of this code will need to change.
369 */
370 if (unlikely(fsindex | next->fsindex | prev->fs)) {
371 loadsegment(fs, next->fsindex);
372
373 /*
374 * If user code wrote a nonzero value to FS, then it also
375 * cleared the overridden base address.
376 *
377 * XXX: if user code wrote 0 to FS and cleared the base
378 * address itself, we won't notice and we'll incorrectly
379 * restore the prior base address next time we reschdule
380 * the process.
381 */
382 if (fsindex)
383 prev->fs = 0;
384 }
385 if (next->fs)
386 wrmsrl(MSR_FS_BASE, next->fs);
387 prev->fsindex = fsindex;
388
389 if (unlikely(gsindex | next->gsindex | prev->gs)) {
390 load_gs_index(next->gsindex);
391
392 /* This works (and fails) the same way as fsindex above. */
393 if (gsindex)
394 prev->gs = 0;
395 }
396 if (next->gs)
397 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
398 prev->gsindex = gsindex;
399
400 switch_fpu_finish(next_p, fpu);
401
402 /*
403 * Switch the PDA and FPU contexts.
404 */
405 prev->usersp = this_cpu_read(old_rsp);
406 this_cpu_write(old_rsp, next->usersp);
407 this_cpu_write(current_task, next_p);
408
409 /*
410 * If it were not for PREEMPT_ACTIVE we could guarantee that the
411 * preempt_count of all tasks was equal here and this would not be
412 * needed.
413 */
414 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
415 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
416
417 this_cpu_write(kernel_stack,
418 (unsigned long)task_stack_page(next_p) +
419 THREAD_SIZE - KERNEL_STACK_OFFSET);
420
421 /*
422 * Now maybe reload the debug registers and handle I/O bitmaps
423 */
424 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
425 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
426 __switch_to_xtra(prev_p, next_p, tss);
427
428 #ifdef CONFIG_XEN
429 /*
430 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
431 * current_pt_regs()->flags may not match the current task's
432 * intended IOPL. We need to switch it manually.
433 */
434 if (unlikely(xen_pv_domain() &&
435 prev->iopl != next->iopl))
436 xen_set_iopl_mask(next->iopl);
437 #endif
438
439 return prev_p;
440 }
441
set_personality_64bit(void)442 void set_personality_64bit(void)
443 {
444 /* inherit personality from parent */
445
446 /* Make sure to be in 64bit mode */
447 clear_thread_flag(TIF_IA32);
448 clear_thread_flag(TIF_ADDR32);
449 clear_thread_flag(TIF_X32);
450
451 /* Ensure the corresponding mm is not marked. */
452 if (current->mm)
453 current->mm->context.ia32_compat = 0;
454
455 /* TBD: overwrites user setup. Should have two bits.
456 But 64bit processes have always behaved this way,
457 so it's not too bad. The main problem is just that
458 32bit childs are affected again. */
459 current->personality &= ~READ_IMPLIES_EXEC;
460 }
461
set_personality_ia32(bool x32)462 void set_personality_ia32(bool x32)
463 {
464 /* inherit personality from parent */
465
466 /* Make sure to be in 32bit mode */
467 set_thread_flag(TIF_ADDR32);
468
469 /* Mark the associated mm as containing 32-bit tasks. */
470 if (x32) {
471 clear_thread_flag(TIF_IA32);
472 set_thread_flag(TIF_X32);
473 if (current->mm)
474 current->mm->context.ia32_compat = TIF_X32;
475 current->personality &= ~READ_IMPLIES_EXEC;
476 /* is_compat_task() uses the presence of the x32
477 syscall bit flag to determine compat status */
478 current_thread_info()->status &= ~TS_COMPAT;
479 } else {
480 set_thread_flag(TIF_IA32);
481 clear_thread_flag(TIF_X32);
482 if (current->mm)
483 current->mm->context.ia32_compat = TIF_IA32;
484 current->personality |= force_personality32;
485 /* Prepare the first "return" to user space */
486 current_thread_info()->status |= TS_COMPAT;
487 }
488 }
489 EXPORT_SYMBOL_GPL(set_personality_ia32);
490
491 /*
492 * Called from fs/proc with a reference on @p to find the function
493 * which called into schedule(). This needs to be done carefully
494 * because the task might wake up and we might look at a stack
495 * changing under us.
496 */
get_wchan(struct task_struct * p)497 unsigned long get_wchan(struct task_struct *p)
498 {
499 unsigned long start, bottom, top, sp, fp, ip;
500 int count = 0;
501
502 if (!p || p == current || p->state == TASK_RUNNING)
503 return 0;
504
505 start = (unsigned long)task_stack_page(p);
506 if (!start)
507 return 0;
508
509 /*
510 * Layout of the stack page:
511 *
512 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
513 * PADDING
514 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
515 * stack
516 * ----------- bottom = start + sizeof(thread_info)
517 * thread_info
518 * ----------- start
519 *
520 * The tasks stack pointer points at the location where the
521 * framepointer is stored. The data on the stack is:
522 * ... IP FP ... IP FP
523 *
524 * We need to read FP and IP, so we need to adjust the upper
525 * bound by another unsigned long.
526 */
527 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
528 top -= 2 * sizeof(unsigned long);
529 bottom = start + sizeof(struct thread_info);
530
531 sp = READ_ONCE(p->thread.sp);
532 if (sp < bottom || sp > top)
533 return 0;
534
535 fp = READ_ONCE(*(unsigned long *)sp);
536 do {
537 if (fp < bottom || fp > top)
538 return 0;
539 ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
540 if (!in_sched_functions(ip))
541 return ip;
542 fp = READ_ONCE(*(unsigned long *)fp);
543 } while (count++ < 16 && p->state != TASK_RUNNING);
544 return 0;
545 }
546
do_arch_prctl(struct task_struct * task,int code,unsigned long addr)547 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
548 {
549 int ret = 0;
550 int doit = task == current;
551 int cpu;
552
553 switch (code) {
554 case ARCH_SET_GS:
555 if (addr >= TASK_SIZE_OF(task))
556 return -EPERM;
557 cpu = get_cpu();
558 /* handle small bases via the GDT because that's faster to
559 switch. */
560 if (addr <= 0xffffffff) {
561 set_32bit_tls(task, GS_TLS, addr);
562 if (doit) {
563 load_TLS(&task->thread, cpu);
564 load_gs_index(GS_TLS_SEL);
565 }
566 task->thread.gsindex = GS_TLS_SEL;
567 task->thread.gs = 0;
568 } else {
569 task->thread.gsindex = 0;
570 task->thread.gs = addr;
571 if (doit) {
572 load_gs_index(0);
573 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
574 }
575 }
576 put_cpu();
577 break;
578 case ARCH_SET_FS:
579 /* Not strictly needed for fs, but do it for symmetry
580 with gs */
581 if (addr >= TASK_SIZE_OF(task))
582 return -EPERM;
583 cpu = get_cpu();
584 /* handle small bases via the GDT because that's faster to
585 switch. */
586 if (addr <= 0xffffffff) {
587 set_32bit_tls(task, FS_TLS, addr);
588 if (doit) {
589 load_TLS(&task->thread, cpu);
590 loadsegment(fs, FS_TLS_SEL);
591 }
592 task->thread.fsindex = FS_TLS_SEL;
593 task->thread.fs = 0;
594 } else {
595 task->thread.fsindex = 0;
596 task->thread.fs = addr;
597 if (doit) {
598 /* set the selector to 0 to not confuse
599 __switch_to */
600 loadsegment(fs, 0);
601 ret = wrmsrl_safe(MSR_FS_BASE, addr);
602 }
603 }
604 put_cpu();
605 break;
606 case ARCH_GET_FS: {
607 unsigned long base;
608 if (task->thread.fsindex == FS_TLS_SEL)
609 base = read_32bit_tls(task, FS_TLS);
610 else if (doit)
611 rdmsrl(MSR_FS_BASE, base);
612 else
613 base = task->thread.fs;
614 ret = put_user(base, (unsigned long __user *)addr);
615 break;
616 }
617 case ARCH_GET_GS: {
618 unsigned long base;
619 unsigned gsindex;
620 if (task->thread.gsindex == GS_TLS_SEL)
621 base = read_32bit_tls(task, GS_TLS);
622 else if (doit) {
623 savesegment(gs, gsindex);
624 if (gsindex)
625 rdmsrl(MSR_KERNEL_GS_BASE, base);
626 else
627 base = task->thread.gs;
628 } else
629 base = task->thread.gs;
630 ret = put_user(base, (unsigned long __user *)addr);
631 break;
632 }
633
634 default:
635 ret = -EINVAL;
636 break;
637 }
638
639 return ret;
640 }
641
sys_arch_prctl(int code,unsigned long addr)642 long sys_arch_prctl(int code, unsigned long addr)
643 {
644 return do_arch_prctl(current, code, addr);
645 }
646
KSTK_ESP(struct task_struct * task)647 unsigned long KSTK_ESP(struct task_struct *task)
648 {
649 return (test_tsk_thread_flag(task, TIF_IA32)) ?
650 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
651 }
652