1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/processor.h
4 *
5 * Copyright (C) 1995-1999 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8 #ifndef __ASM_PROCESSOR_H
9 #define __ASM_PROCESSOR_H
10
11 /*
12 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
13 * no point in shifting all network buffers by 2 bytes just to make some IP
14 * header fields appear aligned in memory, potentially sacrificing some DMA
15 * performance on some platforms.
16 */
17 #define NET_IP_ALIGN 0
18
19 #define MTE_CTRL_GCR_USER_EXCL_SHIFT 0
20 #define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff
21
22 #define MTE_CTRL_TCF_SYNC (1UL << 16)
23 #define MTE_CTRL_TCF_ASYNC (1UL << 17)
24 #define MTE_CTRL_TCF_ASYMM (1UL << 18)
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/build_bug.h>
29 #include <linux/cache.h>
30 #include <linux/init.h>
31 #include <linux/stddef.h>
32 #include <linux/string.h>
33 #include <linux/thread_info.h>
34 #include <linux/android_vendor.h>
35
36 #include <vdso/processor.h>
37
38 #include <asm/alternative.h>
39 #include <asm/cpufeature.h>
40 #include <asm/hw_breakpoint.h>
41 #include <asm/kasan.h>
42 #include <asm/lse.h>
43 #include <asm/pgtable-hwdef.h>
44 #include <asm/pointer_auth.h>
45 #include <asm/ptrace.h>
46 #include <asm/spectre.h>
47 #include <asm/types.h>
48
49 /*
50 * TASK_SIZE - the maximum size of a user space task.
51 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
52 */
53
54 #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
55 #define TASK_SIZE_64 (UL(1) << vabits_actual)
56 #define TASK_SIZE_MAX (UL(1) << VA_BITS)
57
58 #ifdef CONFIG_COMPAT
59 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
60 /*
61 * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
62 * by the compat vectors page.
63 */
64 #define TASK_SIZE_32 UL(0x100000000)
65 #else
66 #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
67 #endif /* CONFIG_ARM64_64K_PAGES */
68 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
69 TASK_SIZE_32 : TASK_SIZE_64)
70 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
71 TASK_SIZE_32 : TASK_SIZE_64)
72 #define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \
73 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
74 #else
75 #define TASK_SIZE TASK_SIZE_64
76 #define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64
77 #endif /* CONFIG_COMPAT */
78
79 #ifdef CONFIG_ARM64_FORCE_52BIT
80 #define STACK_TOP_MAX TASK_SIZE_64
81 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
82 #else
83 #define STACK_TOP_MAX DEFAULT_MAP_WINDOW_64
84 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
85 #endif /* CONFIG_ARM64_FORCE_52BIT */
86
87 #ifdef CONFIG_COMPAT
88 #define AARCH32_VECTORS_BASE 0xffff0000
89 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
90 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
91 #else
92 #define STACK_TOP STACK_TOP_MAX
93 #endif /* CONFIG_COMPAT */
94
95 #ifndef CONFIG_ARM64_FORCE_52BIT
96 #define arch_get_mmap_end(addr, len, flags) \
97 (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
98
99 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
100 base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
101 base)
102 #endif /* CONFIG_ARM64_FORCE_52BIT */
103
104 extern phys_addr_t arm64_dma_phys_limit;
105 #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
106
107 struct debug_info {
108 #ifdef CONFIG_HAVE_HW_BREAKPOINT
109 /* Have we suspended stepping by a debugger? */
110 int suspended_step;
111 /* Allow breakpoints and watchpoints to be disabled for this thread. */
112 int bps_disabled;
113 int wps_disabled;
114 /* Hardware breakpoints pinned to this task. */
115 struct perf_event *hbp_break[ARM_MAX_BRP];
116 struct perf_event *hbp_watch[ARM_MAX_WRP];
117 #endif
118 };
119
120 enum vec_type {
121 ARM64_VEC_SVE = 0,
122 ARM64_VEC_SME,
123 ARM64_VEC_MAX,
124 };
125
126 enum fp_type {
127 FP_STATE_CURRENT, /* Save based on current task state. */
128 FP_STATE_FPSIMD,
129 FP_STATE_SVE,
130 };
131
132 struct cpu_context {
133 unsigned long x19;
134 unsigned long x20;
135 unsigned long x21;
136 unsigned long x22;
137 unsigned long x23;
138 unsigned long x24;
139 unsigned long x25;
140 unsigned long x26;
141 unsigned long x27;
142 unsigned long x28;
143 unsigned long fp;
144 unsigned long sp;
145 unsigned long pc;
146 };
147
148 struct thread_struct {
149 struct cpu_context cpu_context; /* cpu context */
150
151 /*
152 * Whitelisted fields for hardened usercopy:
153 * Maintainers must ensure manually that this contains no
154 * implicit padding.
155 */
156 struct {
157 unsigned long tp_value; /* TLS register */
158 unsigned long tp2_value;
159 u64 fpmr;
160 unsigned long pad;
161 struct user_fpsimd_state fpsimd_state;
162 } uw;
163
164 enum fp_type fp_type; /* registers FPSIMD or SVE? */
165 unsigned int fpsimd_cpu;
166 void *sve_state; /* SVE registers, if any */
167 void *sme_state; /* ZA and ZT state, if any */
168 unsigned int vl[ARM64_VEC_MAX]; /* vector length */
169 unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
170 unsigned long fault_address; /* fault info */
171 unsigned long fault_code; /* ESR_EL1 value */
172 struct debug_info debug; /* debugging */
173
174 struct user_fpsimd_state kernel_fpsimd_state;
175 unsigned int kernel_fpsimd_cpu;
176 #ifdef CONFIG_ARM64_PTR_AUTH
177 struct ptrauth_keys_user keys_user;
178 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
179 struct ptrauth_keys_kernel keys_kernel;
180 #endif
181 #endif
182 #ifdef CONFIG_ARM64_MTE
183 u64 mte_ctrl;
184 #endif
185 u64 sctlr_user;
186 u64 svcr;
187 u64 tpidr2_el0;
188 u64 por_el0;
189 ANDROID_VENDOR_DATA(1);
190 };
191
thread_get_vl(struct thread_struct * thread,enum vec_type type)192 static inline unsigned int thread_get_vl(struct thread_struct *thread,
193 enum vec_type type)
194 {
195 return thread->vl[type];
196 }
197
thread_get_sve_vl(struct thread_struct * thread)198 static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
199 {
200 return thread_get_vl(thread, ARM64_VEC_SVE);
201 }
202
thread_get_sme_vl(struct thread_struct * thread)203 static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
204 {
205 return thread_get_vl(thread, ARM64_VEC_SME);
206 }
207
thread_get_cur_vl(struct thread_struct * thread)208 static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
209 {
210 if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
211 return thread_get_sme_vl(thread);
212 else
213 return thread_get_sve_vl(thread);
214 }
215
216 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
217 void task_set_vl(struct task_struct *task, enum vec_type type,
218 unsigned long vl);
219 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
220 unsigned long vl);
221 unsigned int task_get_vl_onexec(const struct task_struct *task,
222 enum vec_type type);
223
task_get_sve_vl(const struct task_struct * task)224 static inline unsigned int task_get_sve_vl(const struct task_struct *task)
225 {
226 return task_get_vl(task, ARM64_VEC_SVE);
227 }
228
task_get_sme_vl(const struct task_struct * task)229 static inline unsigned int task_get_sme_vl(const struct task_struct *task)
230 {
231 return task_get_vl(task, ARM64_VEC_SME);
232 }
233
task_set_sve_vl(struct task_struct * task,unsigned long vl)234 static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
235 {
236 task_set_vl(task, ARM64_VEC_SVE, vl);
237 }
238
task_get_sve_vl_onexec(const struct task_struct * task)239 static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
240 {
241 return task_get_vl_onexec(task, ARM64_VEC_SVE);
242 }
243
task_set_sve_vl_onexec(struct task_struct * task,unsigned long vl)244 static inline void task_set_sve_vl_onexec(struct task_struct *task,
245 unsigned long vl)
246 {
247 task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
248 }
249
250 #define SCTLR_USER_MASK \
251 (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
252 SCTLR_EL1_TCF0_MASK)
253
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)254 static inline void arch_thread_struct_whitelist(unsigned long *offset,
255 unsigned long *size)
256 {
257 /* Verify that there is no padding among the whitelisted fields: */
258 BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
259 sizeof_field(struct thread_struct, uw.tp_value) +
260 sizeof_field(struct thread_struct, uw.tp2_value) +
261 sizeof_field(struct thread_struct, uw.fpmr) +
262 sizeof_field(struct thread_struct, uw.pad) +
263 sizeof_field(struct thread_struct, uw.fpsimd_state));
264
265 *offset = offsetof(struct thread_struct, uw);
266 *size = sizeof_field(struct thread_struct, uw);
267 }
268
269 #ifdef CONFIG_COMPAT
270 #define task_user_tls(t) \
271 ({ \
272 unsigned long *__tls; \
273 if (is_compat_thread(task_thread_info(t))) \
274 __tls = &(t)->thread.uw.tp2_value; \
275 else \
276 __tls = &(t)->thread.uw.tp_value; \
277 __tls; \
278 })
279 #else
280 #define task_user_tls(t) (&(t)->thread.uw.tp_value)
281 #endif
282
283 /* Sync TPIDR_EL0 back to thread_struct for current */
284 void tls_preserve_current_state(void);
285
286 #define INIT_THREAD { \
287 .fpsimd_cpu = NR_CPUS, \
288 }
289
start_thread_common(struct pt_regs * regs,unsigned long pc)290 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
291 {
292 s32 previous_syscall = regs->syscallno;
293 memset(regs, 0, sizeof(*regs));
294 regs->syscallno = previous_syscall;
295 regs->pc = pc;
296
297 if (system_uses_irq_prio_masking())
298 regs->pmr_save = GIC_PRIO_IRQON;
299 }
300
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)301 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
302 unsigned long sp)
303 {
304 start_thread_common(regs, pc);
305 regs->pstate = PSR_MODE_EL0t;
306 spectre_v4_enable_task_mitigation(current);
307 regs->sp = sp;
308 }
309
310 #ifdef CONFIG_COMPAT
compat_start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)311 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
312 unsigned long sp)
313 {
314 start_thread_common(regs, pc);
315 regs->pstate = PSR_AA32_MODE_USR;
316 if (pc & 1)
317 regs->pstate |= PSR_AA32_T_BIT;
318
319 #ifdef __AARCH64EB__
320 regs->pstate |= PSR_AA32_E_BIT;
321 #endif
322
323 spectre_v4_enable_task_mitigation(current);
324 regs->compat_sp = sp;
325 }
326 #endif
327
is_ttbr0_addr(unsigned long addr)328 static __always_inline bool is_ttbr0_addr(unsigned long addr)
329 {
330 /* entry assembly clears tags for TTBR0 addrs */
331 return addr < TASK_SIZE;
332 }
333
is_ttbr1_addr(unsigned long addr)334 static __always_inline bool is_ttbr1_addr(unsigned long addr)
335 {
336 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
337 return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
338 }
339
340 /* Forward declaration, a strange C thing */
341 struct task_struct;
342
343 unsigned long __get_wchan(struct task_struct *p);
344
345 void update_sctlr_el1(u64 sctlr);
346
347 /* Thread switching */
348 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
349 struct task_struct *next);
350
351 #define task_pt_regs(p) \
352 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
353
354 #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
355 #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
356
357 /*
358 * Prefetching support
359 */
360 #define ARCH_HAS_PREFETCH
prefetch(const void * ptr)361 static inline void prefetch(const void *ptr)
362 {
363 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
364 }
365
366 #define ARCH_HAS_PREFETCHW
prefetchw(const void * ptr)367 static inline void prefetchw(const void *ptr)
368 {
369 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
370 }
371
372 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
373 extern void __init minsigstksz_setup(void);
374
375 /*
376 * Not at the top of the file due to a direct #include cycle between
377 * <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include
378 * ensures that contents of processor.h are visible to fpsimd.h even if
379 * processor.h is included first.
380 *
381 * These prctl helpers are the only things in this file that require
382 * fpsimd.h. The core code expects them to be in this header.
383 */
384 #include <asm/fpsimd.h>
385
386 /* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
387 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
388 #define SVE_GET_VL() sve_get_current_vl()
389 #define SME_SET_VL(arg) sme_set_current_vl(arg)
390 #define SME_GET_VL() sme_get_current_vl()
391
392 /* PR_PAC_RESET_KEYS prctl */
393 #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
394
395 /* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
396 #define PAC_SET_ENABLED_KEYS(tsk, keys, enabled) \
397 ptrauth_set_enabled_keys(tsk, keys, enabled)
398 #define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
399
400 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
401 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
402 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
403 long get_tagged_addr_ctrl(struct task_struct *task);
404 #define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
405 #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
406 #endif
407
408 int get_tsc_mode(unsigned long adr);
409 int set_tsc_mode(unsigned int val);
410 #define GET_TSC_CTL(adr) get_tsc_mode((adr))
411 #define SET_TSC_CTL(val) set_tsc_mode((val))
412
413 #endif /* __ASSEMBLY__ */
414 #endif /* __ASM_PROCESSOR_H */
415