1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/process.c
4 *
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9
10 #include <stdarg.h>
11
12 #include <linux/compat.h>
13 #include <linux/efi.h>
14 #include <linux/elf.h>
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/kernel.h>
21 #include <linux/lockdep.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/nospec.h>
25 #include <linux/stddef.h>
26 #include <linux/sysctl.h>
27 #include <linux/unistd.h>
28 #include <linux/user.h>
29 #include <linux/delay.h>
30 #include <linux/reboot.h>
31 #include <linux/interrupt.h>
32 #include <linux/init.h>
33 #include <linux/cpu.h>
34 #include <linux/elfcore.h>
35 #include <linux/pm.h>
36 #include <linux/tick.h>
37 #include <linux/utsname.h>
38 #include <linux/uaccess.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/personality.h>
42 #include <linux/notifier.h>
43 #include <trace/events/power.h>
44 #include <linux/percpu.h>
45 #include <linux/thread_info.h>
46 #include <linux/prctl.h>
47 #include <trace/hooks/mpam.h>
48
49 #include <asm/alternative.h>
50 #include <asm/arch_gicv3.h>
51 #include <asm/compat.h>
52 #include <asm/cpufeature.h>
53 #include <asm/cacheflush.h>
54 #include <asm/exec.h>
55 #include <asm/fpsimd.h>
56 #include <asm/mmu_context.h>
57 #include <asm/mte.h>
58 #include <asm/processor.h>
59 #include <asm/pointer_auth.h>
60 #include <asm/stacktrace.h>
61
62 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
63 #include <linux/stackprotector.h>
64 unsigned long __stack_chk_guard __ro_after_init;
65 EXPORT_SYMBOL(__stack_chk_guard);
66 #endif
67
68 /*
69 * Function pointers to optional machine specific functions
70 */
71 void (*pm_power_off)(void);
72 EXPORT_SYMBOL_GPL(pm_power_off);
73
__cpu_do_idle(void)74 static void noinstr __cpu_do_idle(void)
75 {
76 dsb(sy);
77 wfi();
78 }
79
__cpu_do_idle_irqprio(void)80 static void noinstr __cpu_do_idle_irqprio(void)
81 {
82 unsigned long pmr;
83 unsigned long daif_bits;
84
85 daif_bits = read_sysreg(daif);
86 write_sysreg(daif_bits | PSR_I_BIT, daif);
87
88 /*
89 * Unmask PMR before going idle to make sure interrupts can
90 * be raised.
91 */
92 pmr = gic_read_pmr();
93 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
94
95 __cpu_do_idle();
96
97 gic_write_pmr(pmr);
98 write_sysreg(daif_bits, daif);
99 }
100
101 /*
102 * cpu_do_idle()
103 *
104 * Idle the processor (wait for interrupt).
105 *
106 * If the CPU supports priority masking we must do additional work to
107 * ensure that interrupts are not masked at the PMR (because the core will
108 * not wake up if we block the wake up signal in the interrupt controller).
109 */
cpu_do_idle(void)110 void noinstr cpu_do_idle(void)
111 {
112 if (system_uses_irq_prio_masking())
113 __cpu_do_idle_irqprio();
114 else
115 __cpu_do_idle();
116 }
117
118 /*
119 * This is our default idle handler.
120 */
arch_cpu_idle(void)121 void noinstr arch_cpu_idle(void)
122 {
123 /*
124 * This should do all the clock switching and wait for interrupt
125 * tricks
126 */
127 cpu_do_idle();
128 raw_local_irq_enable();
129 }
130
131 #ifdef CONFIG_HOTPLUG_CPU
arch_cpu_idle_dead(void)132 void arch_cpu_idle_dead(void)
133 {
134 cpu_die();
135 }
136 #endif
137
138 /*
139 * Called by kexec, immediately prior to machine_kexec().
140 *
141 * This must completely disable all secondary CPUs; simply causing those CPUs
142 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
143 * kexec'd kernel to use any and all RAM as it sees fit, without having to
144 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
145 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
146 */
machine_shutdown(void)147 void machine_shutdown(void)
148 {
149 smp_shutdown_nonboot_cpus(reboot_cpu);
150 }
151
152 /*
153 * Halting simply requires that the secondary CPUs stop performing any
154 * activity (executing tasks, handling interrupts). smp_send_stop()
155 * achieves this.
156 */
machine_halt(void)157 void machine_halt(void)
158 {
159 local_irq_disable();
160 smp_send_stop();
161 while (1);
162 }
163
164 /*
165 * Power-off simply requires that the secondary CPUs stop performing any
166 * activity (executing tasks, handling interrupts). smp_send_stop()
167 * achieves this. When the system power is turned off, it will take all CPUs
168 * with it.
169 */
machine_power_off(void)170 void machine_power_off(void)
171 {
172 local_irq_disable();
173 smp_send_stop();
174 if (pm_power_off)
175 pm_power_off();
176 }
177
178 /*
179 * Restart requires that the secondary CPUs stop performing any activity
180 * while the primary CPU resets the system. Systems with multiple CPUs must
181 * provide a HW restart implementation, to ensure that all CPUs reset at once.
182 * This is required so that any code running after reset on the primary CPU
183 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
184 * executing pre-reset code, and using RAM that the primary CPU's code wishes
185 * to use. Implementing such co-ordination would be essentially impossible.
186 */
machine_restart(char * cmd)187 void machine_restart(char *cmd)
188 {
189 /* Disable interrupts first */
190 local_irq_disable();
191 smp_send_stop();
192
193 /*
194 * UpdateCapsule() depends on the system being reset via
195 * ResetSystem().
196 */
197 if (efi_enabled(EFI_RUNTIME_SERVICES))
198 efi_reboot(reboot_mode, NULL);
199
200 /* Now call the architecture specific reboot code. */
201 do_kernel_restart(cmd);
202
203 /*
204 * Whoops - the architecture was unable to reboot.
205 */
206 printk("Reboot failed -- System halted\n");
207 while (1);
208 }
209
210 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
211 static const char *const btypes[] = {
212 bstr(NONE, "--"),
213 bstr( JC, "jc"),
214 bstr( C, "-c"),
215 bstr( J , "j-")
216 };
217 #undef bstr
218
print_pstate(struct pt_regs * regs)219 static void print_pstate(struct pt_regs *regs)
220 {
221 u64 pstate = regs->pstate;
222
223 if (compat_user_mode(regs)) {
224 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
225 pstate,
226 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
227 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
228 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
229 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
230 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
231 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
232 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
233 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
234 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
235 pstate & PSR_AA32_F_BIT ? 'F' : 'f');
236 } else {
237 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
238 PSR_BTYPE_SHIFT];
239
240 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
241 pstate,
242 pstate & PSR_N_BIT ? 'N' : 'n',
243 pstate & PSR_Z_BIT ? 'Z' : 'z',
244 pstate & PSR_C_BIT ? 'C' : 'c',
245 pstate & PSR_V_BIT ? 'V' : 'v',
246 pstate & PSR_D_BIT ? 'D' : 'd',
247 pstate & PSR_A_BIT ? 'A' : 'a',
248 pstate & PSR_I_BIT ? 'I' : 'i',
249 pstate & PSR_F_BIT ? 'F' : 'f',
250 pstate & PSR_PAN_BIT ? '+' : '-',
251 pstate & PSR_UAO_BIT ? '+' : '-',
252 pstate & PSR_TCO_BIT ? '+' : '-',
253 btype_str);
254 }
255 }
256
__show_regs(struct pt_regs * regs)257 void __show_regs(struct pt_regs *regs)
258 {
259 int i, top_reg;
260 u64 lr, sp;
261
262 if (compat_user_mode(regs)) {
263 lr = regs->compat_lr;
264 sp = regs->compat_sp;
265 top_reg = 12;
266 } else {
267 lr = regs->regs[30];
268 sp = regs->sp;
269 top_reg = 29;
270 }
271
272 show_regs_print_info(KERN_DEFAULT);
273 print_pstate(regs);
274
275 if (!user_mode(regs)) {
276 printk("pc : %pS\n", (void *)regs->pc);
277 printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
278 } else {
279 printk("pc : %016llx\n", regs->pc);
280 printk("lr : %016llx\n", lr);
281 }
282
283 printk("sp : %016llx\n", sp);
284
285 if (system_uses_irq_prio_masking())
286 printk("pmr_save: %08llx\n", regs->pmr_save);
287
288 i = top_reg;
289
290 while (i >= 0) {
291 printk("x%-2d: %016llx ", i, regs->regs[i]);
292 i--;
293
294 if (i % 2 == 0) {
295 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
296 i--;
297 }
298
299 pr_cont("\n");
300 }
301 }
302
show_regs(struct pt_regs * regs)303 void show_regs(struct pt_regs * regs)
304 {
305 __show_regs(regs);
306 dump_backtrace(regs, NULL, KERN_DEFAULT);
307 }
308 EXPORT_SYMBOL_GPL(show_regs);
309
tls_thread_flush(void)310 static void tls_thread_flush(void)
311 {
312 write_sysreg(0, tpidr_el0);
313
314 if (is_compat_task()) {
315 current->thread.uw.tp_value = 0;
316
317 /*
318 * We need to ensure ordering between the shadow state and the
319 * hardware state, so that we don't corrupt the hardware state
320 * with a stale shadow state during context switch.
321 */
322 barrier();
323 write_sysreg(0, tpidrro_el0);
324 }
325 }
326
flush_tagged_addr_state(void)327 static void flush_tagged_addr_state(void)
328 {
329 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
330 clear_thread_flag(TIF_TAGGED_ADDR);
331 }
332
flush_thread(void)333 void flush_thread(void)
334 {
335 fpsimd_flush_thread();
336 tls_thread_flush();
337 flush_ptrace_hw_breakpoint(current);
338 flush_tagged_addr_state();
339 }
340
release_thread(struct task_struct * dead_task)341 void release_thread(struct task_struct *dead_task)
342 {
343 }
344
arch_release_task_struct(struct task_struct * tsk)345 void arch_release_task_struct(struct task_struct *tsk)
346 {
347 fpsimd_release_task(tsk);
348 }
349
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)350 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
351 {
352 if (current->mm)
353 fpsimd_preserve_current_state();
354 *dst = *src;
355
356 /* We rely on the above assignment to initialize dst's thread_flags: */
357 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
358
359 /*
360 * Detach src's sve_state (if any) from dst so that it does not
361 * get erroneously used or freed prematurely. dst's sve_state
362 * will be allocated on demand later on if dst uses SVE.
363 * For consistency, also clear TIF_SVE here: this could be done
364 * later in copy_process(), but to avoid tripping up future
365 * maintainers it is best not to leave TIF_SVE and sve_state in
366 * an inconsistent state, even temporarily.
367 */
368 dst->thread.sve_state = NULL;
369 clear_tsk_thread_flag(dst, TIF_SVE);
370
371 /* clear any pending asynchronous tag fault raised by the parent */
372 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
373
374 return 0;
375 }
376
377 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
378
copy_thread(unsigned long clone_flags,unsigned long stack_start,unsigned long stk_sz,struct task_struct * p,unsigned long tls)379 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
380 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
381 {
382 struct pt_regs *childregs = task_pt_regs(p);
383
384 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
385
386 /*
387 * In case p was allocated the same task_struct pointer as some
388 * other recently-exited task, make sure p is disassociated from
389 * any cpu that may have run that now-exited task recently.
390 * Otherwise we could erroneously skip reloading the FPSIMD
391 * registers for p.
392 */
393 fpsimd_flush_task_state(p);
394
395 ptrauth_thread_init_kernel(p);
396
397 if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
398 *childregs = *current_pt_regs();
399 childregs->regs[0] = 0;
400
401 /*
402 * Read the current TLS pointer from tpidr_el0 as it may be
403 * out-of-sync with the saved value.
404 */
405 *task_user_tls(p) = read_sysreg(tpidr_el0);
406
407 if (stack_start) {
408 if (is_compat_thread(task_thread_info(p)))
409 childregs->compat_sp = stack_start;
410 else
411 childregs->sp = stack_start;
412 }
413
414 /*
415 * If a TLS pointer was passed to clone, use it for the new
416 * thread.
417 */
418 if (clone_flags & CLONE_SETTLS)
419 p->thread.uw.tp_value = tls;
420 } else {
421 /*
422 * A kthread has no context to ERET to, so ensure any buggy
423 * ERET is treated as an illegal exception return.
424 *
425 * When a user task is created from a kthread, childregs will
426 * be initialized by start_thread() or start_compat_thread().
427 */
428 memset(childregs, 0, sizeof(struct pt_regs));
429 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
430
431 p->thread.cpu_context.x19 = stack_start;
432 p->thread.cpu_context.x20 = stk_sz;
433 }
434 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
435 p->thread.cpu_context.sp = (unsigned long)childregs;
436
437 ptrace_hw_copy_thread(p);
438
439 return 0;
440 }
441
tls_preserve_current_state(void)442 void tls_preserve_current_state(void)
443 {
444 *task_user_tls(current) = read_sysreg(tpidr_el0);
445 }
446
tls_thread_switch(struct task_struct * next)447 static void tls_thread_switch(struct task_struct *next)
448 {
449 tls_preserve_current_state();
450
451 if (is_compat_thread(task_thread_info(next)))
452 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
453 else if (!arm64_kernel_unmapped_at_el0())
454 write_sysreg(0, tpidrro_el0);
455
456 write_sysreg(*task_user_tls(next), tpidr_el0);
457 }
458
459 /* Restore the UAO state depending on next's addr_limit */
uao_thread_switch(struct task_struct * next)460 void uao_thread_switch(struct task_struct *next)
461 {
462 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
463 if (task_thread_info(next)->addr_limit == KERNEL_DS)
464 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
465 else
466 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
467 }
468 }
469
470 /*
471 * Force SSBS state on context-switch, since it may be lost after migrating
472 * from a CPU which treats the bit as RES0 in a heterogeneous system.
473 */
ssbs_thread_switch(struct task_struct * next)474 static void ssbs_thread_switch(struct task_struct *next)
475 {
476 /*
477 * Nothing to do for kernel threads, but 'regs' may be junk
478 * (e.g. idle task) so check the flags and bail early.
479 */
480 if (unlikely(next->flags & PF_KTHREAD))
481 return;
482
483 /*
484 * If all CPUs implement the SSBS extension, then we just need to
485 * context-switch the PSTATE field.
486 */
487 if (cpus_have_const_cap(ARM64_SSBS))
488 return;
489
490 spectre_v4_enable_task_mitigation(next);
491 }
492
493 /*
494 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
495 * shadow copy so that we can restore this upon entry from userspace.
496 *
497 * This is *only* for exception entry from EL0, and is not valid until we
498 * __switch_to() a user task.
499 */
500 DEFINE_PER_CPU(struct task_struct *, __entry_task);
501
entry_task_switch(struct task_struct * next)502 static void entry_task_switch(struct task_struct *next)
503 {
504 __this_cpu_write(__entry_task, next);
505 }
506
507 /*
508 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
509 * Ensure access is disabled when switching to a 32bit task, ensure
510 * access is enabled when switching to a 64bit task.
511 */
erratum_1418040_thread_switch(struct task_struct * next)512 static void erratum_1418040_thread_switch(struct task_struct *next)
513 {
514 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
515 !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
516 return;
517
518 if (is_compat_thread(task_thread_info(next)))
519 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
520 else
521 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
522 }
523
erratum_1418040_new_exec(void)524 static void erratum_1418040_new_exec(void)
525 {
526 preempt_disable();
527 erratum_1418040_thread_switch(current);
528 preempt_enable();
529 }
530
531 /*
532 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
533 * this function must be called with preemption disabled and the update to
534 * sctlr_user must be made in the same preemption disabled block so that
535 * __switch_to() does not see the variable update before the SCTLR_EL1 one.
536 */
update_sctlr_el1(u64 sctlr)537 void update_sctlr_el1(u64 sctlr)
538 {
539 /*
540 * EnIA must not be cleared while in the kernel as this is necessary for
541 * in-kernel PAC. It will be cleared on kernel exit if needed.
542 */
543 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
544
545 /* ISB required for the kernel uaccess routines when setting TCF0. */
546 isb();
547 }
548
549 /*
550 * Thread switching.
551 */
__switch_to(struct task_struct * prev,struct task_struct * next)552 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
553 struct task_struct *next)
554 {
555 struct task_struct *last;
556
557 fpsimd_thread_switch(next);
558 tls_thread_switch(next);
559 hw_breakpoint_thread_switch(next);
560 contextidr_thread_switch(next);
561 entry_task_switch(next);
562 uao_thread_switch(next);
563 ssbs_thread_switch(next);
564 erratum_1418040_thread_switch(next);
565 ptrauth_thread_switch_user(next);
566 /*
567 * vendor hook is needed before the dsb(),
568 * because MPAM is related to cache maintenance.
569 */
570 trace_android_vh_mpam_set(prev, next);
571
572 /*
573 * Complete any pending TLB or cache maintenance on this CPU in case
574 * the thread migrates to a different CPU.
575 * This full barrier is also required by the membarrier system
576 * call.
577 */
578 dsb(ish);
579
580 /*
581 * MTE thread switching must happen after the DSB above to ensure that
582 * any asynchronous tag check faults have been logged in the TFSR*_EL1
583 * registers.
584 */
585 mte_thread_switch(next);
586 /* avoid expensive SCTLR_EL1 accesses if no change */
587 if (prev->thread.sctlr_user != next->thread.sctlr_user)
588 update_sctlr_el1(next->thread.sctlr_user);
589
590 /* the actual thread switch */
591 last = cpu_switch_to(prev, next);
592
593 return last;
594 }
595
get_wchan(struct task_struct * p)596 unsigned long get_wchan(struct task_struct *p)
597 {
598 struct stackframe frame;
599 unsigned long stack_page, ret = 0;
600 int count = 0;
601 if (!p || p == current || p->state == TASK_RUNNING)
602 return 0;
603
604 stack_page = (unsigned long)try_get_task_stack(p);
605 if (!stack_page)
606 return 0;
607
608 start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
609
610 do {
611 if (unwind_frame(p, &frame))
612 goto out;
613 if (!in_sched_functions(frame.pc)) {
614 ret = frame.pc;
615 goto out;
616 }
617 } while (count ++ < 16);
618
619 out:
620 put_task_stack(p);
621 return ret;
622 }
623
arch_align_stack(unsigned long sp)624 unsigned long arch_align_stack(unsigned long sp)
625 {
626 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
627 sp -= get_random_int() & ~PAGE_MASK;
628 return sp & ~0xf;
629 }
630
631 /*
632 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
633 */
arch_setup_new_exec(void)634 void arch_setup_new_exec(void)
635 {
636 unsigned long mmflags = 0;
637
638 if (is_compat_task()) {
639 mmflags = MMCF_AARCH32;
640
641 /*
642 * Restrict the CPU affinity mask for a 32-bit task so that
643 * it contains only 32-bit-capable CPUs.
644 *
645 * From the perspective of the task, this looks similar to
646 * what would happen if the 64-bit-only CPUs were hot-unplugged
647 * at the point of execve(), although we try a bit harder to
648 * honour the cpuset hierarchy.
649 */
650 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
651 force_compatible_cpus_allowed_ptr(current);
652 }
653
654 current->mm->context.flags = mmflags;
655 ptrauth_thread_init_user();
656 erratum_1418040_new_exec();
657 mte_thread_init_user();
658
659 if (task_spec_ssb_noexec(current)) {
660 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
661 PR_SPEC_ENABLE);
662 }
663 }
664
665 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
666 /*
667 * Control the relaxed ABI allowing tagged user addresses into the kernel.
668 */
669 static unsigned int tagged_addr_disabled;
670
set_tagged_addr_ctrl(struct task_struct * task,unsigned long arg)671 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
672 {
673 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
674 struct thread_info *ti = task_thread_info(task);
675
676 if (is_compat_thread(ti))
677 return -EINVAL;
678
679 if (system_supports_mte())
680 valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
681 | PR_MTE_TAG_MASK;
682
683 if (arg & ~valid_mask)
684 return -EINVAL;
685
686 /*
687 * Do not allow the enabling of the tagged address ABI if globally
688 * disabled via sysctl abi.tagged_addr_disabled.
689 */
690 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
691 return -EINVAL;
692
693 if (set_mte_ctrl(task, arg) != 0)
694 return -EINVAL;
695
696 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
697
698 return 0;
699 }
700
get_tagged_addr_ctrl(struct task_struct * task)701 long get_tagged_addr_ctrl(struct task_struct *task)
702 {
703 long ret = 0;
704 struct thread_info *ti = task_thread_info(task);
705
706 if (is_compat_thread(ti))
707 return -EINVAL;
708
709 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
710 ret = PR_TAGGED_ADDR_ENABLE;
711
712 ret |= get_mte_ctrl(task);
713
714 return ret;
715 }
716
717 /*
718 * Global sysctl to disable the tagged user addresses support. This control
719 * only prevents the tagged address ABI enabling via prctl() and does not
720 * disable it for tasks that already opted in to the relaxed ABI.
721 */
722
723 static struct ctl_table tagged_addr_sysctl_table[] = {
724 {
725 .procname = "tagged_addr_disabled",
726 .mode = 0644,
727 .data = &tagged_addr_disabled,
728 .maxlen = sizeof(int),
729 .proc_handler = proc_dointvec_minmax,
730 .extra1 = SYSCTL_ZERO,
731 .extra2 = SYSCTL_ONE,
732 },
733 { }
734 };
735
tagged_addr_init(void)736 static int __init tagged_addr_init(void)
737 {
738 if (!register_sysctl("abi", tagged_addr_sysctl_table))
739 return -EINVAL;
740 return 0;
741 }
742
743 core_initcall(tagged_addr_init);
744 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
745
arm64_preempt_schedule_irq(void)746 asmlinkage void __sched arm64_preempt_schedule_irq(void)
747 {
748 lockdep_assert_irqs_disabled();
749
750 /*
751 * Preempting a task from an IRQ means we leave copies of PSTATE
752 * on the stack. cpufeature's enable calls may modify PSTATE, but
753 * resuming one of these preempted tasks would undo those changes.
754 *
755 * Only allow a task to be preempted once cpufeatures have been
756 * enabled.
757 */
758 if (system_capabilities_finalized())
759 preempt_schedule_irq();
760 }
761
762 #ifdef CONFIG_BINFMT_ELF
arch_elf_adjust_prot(int prot,const struct arch_elf_state * state,bool has_interp,bool is_interp)763 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
764 bool has_interp, bool is_interp)
765 {
766 /*
767 * For dynamically linked executables the interpreter is
768 * responsible for setting PROT_BTI on everything except
769 * itself.
770 */
771 if (is_interp != has_interp)
772 return prot;
773
774 if (!(state->flags & ARM64_ELF_BTI))
775 return prot;
776
777 if (prot & PROT_EXEC)
778 prot |= PROT_BTI;
779
780 return prot;
781 }
782 #endif
783