/arch/x86/kernel/ |
D | espfix_64.c | 148 void *stack_page; in init_espfix_ap() local 160 stack_page = ACCESS_ONCE(espfix_pages[page]); in init_espfix_ap() 161 if (likely(stack_page)) in init_espfix_ap() 167 stack_page = ACCESS_ONCE(espfix_pages[page]); in init_espfix_ap() 168 if (stack_page) in init_espfix_ap() 194 stack_page = (void *)__get_free_page(GFP_KERNEL); in init_espfix_ap() 195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); in init_espfix_ap() 200 ACCESS_ONCE(espfix_pages[page]) = stack_page; in init_espfix_ap() 206 this_cpu_write(espfix_waddr, (unsigned long)stack_page in init_espfix_ap()
|
D | process_32.c | 324 unsigned long stack_page; in get_wchan() local 328 stack_page = (unsigned long)task_stack_page(p); in get_wchan() 330 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) in get_wchan() 335 if (bp < stack_page || bp > top_ebp+stack_page) in get_wchan()
|
/arch/cris/arch-v10/kernel/ |
D | process.c | 152 unsigned long stack_page; in get_wchan() 156 stack_page = (unsigned long)p; in get_wchan() 158 if (!stack_page || esp < stack_page || esp > 8188+stack_page) in get_wchan() 163 if (ebp < stack_page || ebp > 8184+stack_page) in get_wchan()
|
/arch/mips/kernel/ |
D | perf_event.c | 53 unsigned long stack_page = in perf_callchain_kernel() local 55 if (stack_page && sp >= stack_page && in perf_callchain_kernel() 56 sp <= stack_page + THREAD_SIZE - 32) in perf_callchain_kernel()
|
D | stacktrace.c | 42 unsigned long stack_page = in save_context_stack() local 44 if (stack_page && sp >= stack_page && in save_context_stack() 45 sp <= stack_page + THREAD_SIZE - 32) in save_context_stack()
|
D | process.c | 449 unsigned long notrace unwind_stack_by_address(unsigned long stack_page, in unwind_stack_by_address() argument 460 if (!stack_page) in unwind_stack_by_address() 470 if (*sp >= stack_page && in unwind_stack_by_address() 471 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { in unwind_stack_by_address() 499 if (*sp < stack_page || in unwind_stack_by_address() 500 *sp + info.frame_size > stack_page + THREAD_SIZE - 32) in unwind_stack_by_address() 524 unsigned long stack_page = (unsigned long)task_stack_page(task); in unwind_stack() local 525 return unwind_stack_by_address(stack_page, sp, pc, ra); in unwind_stack()
|
/arch/h8300/kernel/ |
D | process.c | 137 unsigned long stack_page; in get_wchan() local 142 stack_page = (unsigned long)p; in get_wchan() 145 if (fp < stack_page+sizeof(struct thread_info) || in get_wchan() 146 fp >= 8184+stack_page) in get_wchan()
|
/arch/hexagon/kernel/ |
D | process.c | 163 unsigned long stack_page; in get_wchan() local 168 stack_page = (unsigned long)task_stack_page(p); in get_wchan() 171 if (fp < (stack_page + sizeof(struct thread_info)) || in get_wchan() 172 fp >= (THREAD_SIZE - 8 + stack_page)) in get_wchan()
|
/arch/avr32/kernel/ |
D | process.c | 320 unsigned long stack_page; in get_wchan() local 325 stack_page = (unsigned long)task_stack_page(p); in get_wchan() 326 BUG_ON(!stack_page); in get_wchan() 336 BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page)); in get_wchan() 349 BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page)); in get_wchan()
|
/arch/m68k/kernel/ |
D | process.c | 260 unsigned long stack_page; in get_wchan() local 265 stack_page = (unsigned long)task_stack_page(p); in get_wchan() 268 if (fp < stack_page+sizeof(struct thread_info) || in get_wchan() 269 fp >= 8184+stack_page) in get_wchan()
|
/arch/arm/kvm/ |
D | arm.c | 807 unsigned long stack_page; in cpu_init_hyp_mode() local 815 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); in cpu_init_hyp_mode() 816 hyp_stack_ptr = stack_page + PAGE_SIZE; in cpu_init_hyp_mode() 891 unsigned long stack_page; in init_hyp_mode() local 893 stack_page = __get_free_page(GFP_KERNEL); in init_hyp_mode() 894 if (!stack_page) { in init_hyp_mode() 899 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; in init_hyp_mode() 915 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); in init_hyp_mode() local 916 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); in init_hyp_mode()
|
/arch/um/kernel/ |
D | process.c | 388 unsigned long stack_page, sp, ip; in get_wchan() local 394 stack_page = (unsigned long) task_stack_page(p); in get_wchan() 396 if (stack_page == 0) in get_wchan() 404 if (sp < stack_page) in get_wchan() 407 while (sp < stack_page + THREAD_SIZE) { in get_wchan()
|
/arch/arm64/kernel/ |
D | process.c | 390 unsigned long stack_page; in get_wchan() local 398 stack_page = (unsigned long)task_stack_page(p); in get_wchan() 400 if (frame.sp < stack_page || in get_wchan() 401 frame.sp >= stack_page + THREAD_SIZE || in get_wchan()
|
/arch/xtensa/kernel/ |
D | process.c | 287 unsigned long stack_page = (unsigned long) task_stack_page(p); in get_wchan() local 297 if (sp < stack_page + sizeof(struct task_struct) || in get_wchan() 298 sp >= (stack_page + THREAD_SIZE) || in get_wchan()
|
/arch/mips/include/asm/ |
D | stacktrace.h | 10 extern unsigned long unwind_stack_by_address(unsigned long stack_page,
|
/arch/blackfin/kernel/ |
D | process.c | 146 unsigned long stack_page; in get_wchan() local 151 stack_page = (unsigned long)p; in get_wchan() 154 if (fp < stack_page + sizeof(struct thread_info) || in get_wchan() 155 fp >= 8184 + stack_page) in get_wchan()
|
/arch/powerpc/kernel/ |
D | process.c | 1238 unsigned long stack_page; in valid_irq_stack() local 1246 stack_page = (unsigned long) hardirq_ctx[cpu]; in valid_irq_stack() 1247 if (sp >= stack_page + sizeof(struct thread_struct) in valid_irq_stack() 1248 && sp <= stack_page + THREAD_SIZE - nbytes) in valid_irq_stack() 1251 stack_page = (unsigned long) softirq_ctx[cpu]; in valid_irq_stack() 1252 if (sp >= stack_page + sizeof(struct thread_struct) in valid_irq_stack() 1253 && sp <= stack_page + THREAD_SIZE - nbytes) in valid_irq_stack() 1262 unsigned long stack_page = (unsigned long)task_stack_page(p); in validate_sp() local 1264 if (sp >= stack_page + sizeof(struct thread_struct) in validate_sp() 1265 && sp <= stack_page + THREAD_SIZE - nbytes) in validate_sp()
|