1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASMARM_TLS_H
3 #define __ASMARM_TLS_H
4
5 #include <linux/compiler.h>
6 #include <asm/thread_info.h>
7
8 #ifdef __ASSEMBLY__
9 #include <asm/asm-offsets.h>
10 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
11 .endm
12
13 .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
14 mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
15 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
16 mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
17 str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
18 .endm
19
20 .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
21 ldr \tmp1, =elf_hwcap
22 ldr \tmp1, [\tmp1, #0]
23 mov \tmp2, #0xffff0fff
24 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
25 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
26 mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
27 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
28 mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
29 strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
30 .endm
31
32 .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
33 mov \tmp1, #0xffff0fff
34 str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
35 .endm
36 #endif
37
38 #ifdef CONFIG_TLS_REG_EMUL
39 #define tls_emu 1
40 #define has_tls_reg 1
41 #define switch_tls switch_tls_none
42 #elif defined(CONFIG_CPU_V6)
43 #define tls_emu 0
44 #define has_tls_reg (elf_hwcap & HWCAP_TLS)
45 #define switch_tls switch_tls_v6
46 #elif defined(CONFIG_CPU_32v6K)
47 #define tls_emu 0
48 #define has_tls_reg 1
49 #define switch_tls switch_tls_v6k
50 #else
51 #define tls_emu 0
52 #define has_tls_reg 0
53 #define switch_tls switch_tls_software
54 #endif
55
56 #ifndef __ASSEMBLY__
57
set_tls(unsigned long val)58 static inline void set_tls(unsigned long val)
59 {
60 struct thread_info *thread;
61
62 thread = current_thread_info();
63
64 thread->tp_value[0] = val;
65
66 /*
67 * This code runs with preemption enabled and therefore must
68 * be reentrant with respect to switch_tls.
69 *
70 * We need to ensure ordering between the shadow state and the
71 * hardware state, so that we don't corrupt the hardware state
72 * with a stale shadow state during context switch.
73 *
74 * If we're preempted here, switch_tls will load TPIDRURO from
75 * thread_info upon resuming execution and the following mcr
76 * is merely redundant.
77 */
78 barrier();
79
80 if (!tls_emu) {
81 if (has_tls_reg) {
82 asm("mcr p15, 0, %0, c13, c0, 3"
83 : : "r" (val));
84 } else {
85 #ifdef CONFIG_KUSER_HELPERS
86 /*
87 * User space must never try to access this
88 * directly. Expect your app to break
89 * eventually if you do so. The user helper
90 * at 0xffff0fe0 must be used instead. (see
91 * entry-armv.S for details)
92 */
93 *((unsigned int *)0xffff0ff0) = val;
94 #endif
95 }
96
97 }
98 }
99
get_tpuser(void)100 static inline unsigned long get_tpuser(void)
101 {
102 unsigned long reg = 0;
103
104 if (has_tls_reg && !tls_emu)
105 __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
106
107 return reg;
108 }
109
set_tpuser(unsigned long val)110 static inline void set_tpuser(unsigned long val)
111 {
112 /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
113 * we need not update thread_info.
114 */
115 if (has_tls_reg && !tls_emu) {
116 asm("mcr p15, 0, %0, c13, c0, 2"
117 : : "r" (val));
118 }
119 }
120
flush_tls(void)121 static inline void flush_tls(void)
122 {
123 set_tls(0);
124 set_tpuser(0);
125 }
126
127 #endif
128 #endif /* __ASMARM_TLS_H */
129