• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/processor.h
4  *
5  *  Copyright (C) 1995-1999 Russell King
6  */
7 
8 #ifndef __ASM_ARM_PROCESSOR_H
9 #define __ASM_ARM_PROCESSOR_H
10 
11 #ifdef __KERNEL__
12 
13 #include <asm/hw_breakpoint.h>
14 #include <asm/ptrace.h>
15 #include <asm/types.h>
16 #include <asm/unified.h>
17 
18 #ifdef __KERNEL__
19 #define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
20 			 TASK_SIZE : TASK_SIZE_26)
21 #define STACK_TOP_MAX	TASK_SIZE
22 #endif
23 
24 struct debug_info {
25 #ifdef CONFIG_HAVE_HW_BREAKPOINT
26 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
27 #endif
28 };
29 
30 struct thread_struct {
31 							/* fault info	  */
32 	unsigned long		address;
33 	unsigned long		trap_no;
34 	unsigned long		error_code;
35 							/* debugging	  */
36 	struct debug_info	debug;
37 };
38 
39 /*
40  * Everything usercopied to/from thread_struct is statically-sized, so
41  * no hardened usercopy whitelist is needed.
42  */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)43 static inline void arch_thread_struct_whitelist(unsigned long *offset,
44 						unsigned long *size)
45 {
46 	*offset = *size = 0;
47 }
48 
49 #define INIT_THREAD  {	}
50 
51 #define start_thread(regs,pc,sp)					\
52 ({									\
53 	unsigned long r7, r8, r9;					\
54 									\
55 	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) {			\
56 		r7 = regs->ARM_r7;					\
57 		r8 = regs->ARM_r8;					\
58 		r9 = regs->ARM_r9;					\
59 	}								\
60 	memset(regs->uregs, 0, sizeof(regs->uregs));			\
61 	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&			\
62 	    current->personality & FDPIC_FUNCPTRS) {			\
63 		regs->ARM_r7 = r7;					\
64 		regs->ARM_r8 = r8;					\
65 		regs->ARM_r9 = r9;					\
66 		regs->ARM_r10 = current->mm->start_data;		\
67 	} else if (!IS_ENABLED(CONFIG_MMU))				\
68 		regs->ARM_r10 = current->mm->start_data;		\
69 	if (current->personality & ADDR_LIMIT_32BIT)			\
70 		regs->ARM_cpsr = USR_MODE;				\
71 	else								\
72 		regs->ARM_cpsr = USR26_MODE;				\
73 	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
74 		regs->ARM_cpsr |= PSR_T_BIT;				\
75 	regs->ARM_cpsr |= PSR_ENDSTATE;					\
76 	regs->ARM_pc = pc & ~1;		/* pc */			\
77 	regs->ARM_sp = sp;		/* sp */			\
78 })
79 
80 /* Forward declaration, a strange C thing */
81 struct task_struct;
82 
83 /* Free all resources held by a thread. */
84 extern void release_thread(struct task_struct *);
85 
86 unsigned long get_wchan(struct task_struct *p);
87 
88 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
89 #define cpu_relax()						\
90 	do {							\
91 		smp_mb();					\
92 		__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;");	\
93 	} while (0)
94 #else
95 #define cpu_relax()			barrier()
96 #endif
97 
98 #define task_pt_regs(p) \
99 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
100 
101 #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
102 #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
103 
104 #ifdef CONFIG_SMP
105 #define __ALT_SMP_ASM(smp, up)						\
106 	"9998:	" smp "\n"						\
107 	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
108 	"	.long	9998b\n"					\
109 	"	" up "\n"						\
110 	"	.popsection\n"
111 #else
112 #define __ALT_SMP_ASM(smp, up)	up
113 #endif
114 
115 /*
116  * Prefetching support - only ARMv5.
117  */
118 #if __LINUX_ARM_ARCH__ >= 5
119 
120 #define ARCH_HAS_PREFETCH
prefetch(const void * ptr)121 static inline void prefetch(const void *ptr)
122 {
123 	__asm__ __volatile__(
124 		"pld\t%a0"
125 		:: "p" (ptr));
126 }
127 
128 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
129 #define ARCH_HAS_PREFETCHW
prefetchw(const void * ptr)130 static inline void prefetchw(const void *ptr)
131 {
132 	__asm__ __volatile__(
133 		".arch_extension	mp\n"
134 		__ALT_SMP_ASM(
135 			"pldw\t%a0",
136 			"pld\t%a0"
137 		)
138 		:: "p" (ptr));
139 }
140 #endif
141 #endif
142 
143 #endif
144 
145 #endif /* __ASM_ARM_PROCESSOR_H */
146