• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Contains CPU feature definitions
3  *
4  * Copyright (C) 2015 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt) "CPU features: " fmt
20 
21 #include <linux/bsearch.h>
22 #include <linux/cpumask.h>
23 #include <linux/sort.h>
24 #include <linux/stop_machine.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/cpu.h>
28 #include <asm/cpu.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cpu_ops.h>
31 #include <asm/mmu_context.h>
32 #include <asm/processor.h>
33 #include <asm/sysreg.h>
34 #include <asm/traps.h>
35 #include <asm/virt.h>
36 
37 unsigned long elf_hwcap __read_mostly;
38 EXPORT_SYMBOL_GPL(elf_hwcap);
39 
40 #ifdef CONFIG_COMPAT
41 #define COMPAT_ELF_HWCAP_DEFAULT	\
42 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
43 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
44 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
45 				 COMPAT_HWCAP_LPAE)
46 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
47 unsigned int compat_elf_hwcap2 __read_mostly;
48 #endif
49 
50 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
51 EXPORT_SYMBOL(cpu_hwcaps);
52 
dump_cpu_hwcaps(struct notifier_block * self,unsigned long v,void * p)53 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
54 {
55 	/* file-wide pr_fmt adds "CPU features: " prefix */
56 	pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
57 	return 0;
58 }
59 
60 static struct notifier_block cpu_hwcaps_notifier = {
61 	.notifier_call = dump_cpu_hwcaps
62 };
63 
register_cpu_hwcaps_dumper(void)64 static int __init register_cpu_hwcaps_dumper(void)
65 {
66 	atomic_notifier_chain_register(&panic_notifier_list,
67 				       &cpu_hwcaps_notifier);
68 	return 0;
69 }
70 __initcall(register_cpu_hwcaps_dumper);
71 
72 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
73 EXPORT_SYMBOL(cpu_hwcap_keys);
74 
75 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
76 	{						\
77 		.sign = SIGNED,				\
78 		.visible = VISIBLE,			\
79 		.strict = STRICT,			\
80 		.type = TYPE,				\
81 		.shift = SHIFT,				\
82 		.width = WIDTH,				\
83 		.safe_val = SAFE_VAL,			\
84 	}
85 
86 /* Define a feature with unsigned values */
87 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
88 	__ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
89 
90 /* Define a feature with a signed value */
91 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
92 	__ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
93 
94 #define ARM64_FTR_END					\
95 	{						\
96 		.width = 0,				\
97 	}
98 
99 /* meta feature for alternatives */
100 static bool __maybe_unused
101 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
102 
103 
104 /*
105  * NOTE: Any changes to the visibility of features should be kept in
106  * sync with the documentation of the CPU feature register ABI.
107  */
108 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
109 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0),
110 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
111 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
112 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
113 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
114 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
115 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
116 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
117 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
118 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
119 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
120 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
121 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
122 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
123 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
124 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
125 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
126 	ARM64_FTR_END,
127 };
128 
129 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
130 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
131 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
132 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
133 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
134 	ARM64_FTR_END,
135 };
136 
137 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
138 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
139 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
140 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
141 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
142 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
143 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
144 	/* Linux doesn't care about the EL3 */
145 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
146 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
147 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
148 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
149 	ARM64_FTR_END,
150 };
151 
152 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
153 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
154 	ARM64_FTR_END,
155 };
156 
157 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
158 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
159 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
160 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
161 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
162 	/* Linux shouldn't care about secure memory */
163 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
164 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
165 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
166 	/*
167 	 * Differing PARange is fine as long as all peripherals and memory are mapped
168 	 * within the minimum PARange of all CPUs
169 	 */
170 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
171 	ARM64_FTR_END,
172 };
173 
174 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
175 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
176 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
177 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
178 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
179 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
180 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
181 	ARM64_FTR_END,
182 };
183 
184 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
185 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
186 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
187 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
188 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
189 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
190 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
191 	ARM64_FTR_END,
192 };
193 
194 static const struct arm64_ftr_bits ftr_ctr[] = {
195 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),		/* RES1 */
196 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1),	/* DIC */
197 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1),	/* IDC */
198 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0),	/* CWG */
199 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0),	/* ERG */
200 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
201 	/*
202 	 * Linux can handle differing I-cache policies. Userspace JITs will
203 	 * make use of *minLine.
204 	 * If we have differing I-cache policies, report it as the weakest - VIPT.
205 	 */
206 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),	/* L1Ip */
207 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
208 	ARM64_FTR_END,
209 };
210 
211 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
212 	.name		= "SYS_CTR_EL0",
213 	.ftr_bits	= ftr_ctr
214 };
215 
216 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
217 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),	/* InnerShr */
218 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),	/* FCSE */
219 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),	/* AuxReg */
220 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),	/* TCM */
221 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),	/* ShareLvl */
222 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),	/* OuterShr */
223 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),	/* PMSA */
224 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* VMSA */
225 	ARM64_FTR_END,
226 };
227 
228 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
229 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
230 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
231 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
232 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
233 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
234 	/*
235 	 * We can instantiate multiple PMU instances with different levels
236 	 * of support.
237 	 */
238 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
239 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
240 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
241 	ARM64_FTR_END,
242 };
243 
244 static const struct arm64_ftr_bits ftr_mvfr2[] = {
245 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),		/* FPMisc */
246 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),		/* SIMDMisc */
247 	ARM64_FTR_END,
248 };
249 
250 static const struct arm64_ftr_bits ftr_dczid[] = {
251 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),		/* DZP */
252 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* BS */
253 	ARM64_FTR_END,
254 };
255 
256 
257 static const struct arm64_ftr_bits ftr_id_isar5[] = {
258 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
259 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
260 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
261 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
262 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
263 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
264 	ARM64_FTR_END,
265 };
266 
267 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
268 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),	/* ac2 */
269 	ARM64_FTR_END,
270 };
271 
272 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
273 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),		/* State3 */
274 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),		/* State2 */
275 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),		/* State1 */
276 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),		/* State0 */
277 	ARM64_FTR_END,
278 };
279 
280 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
281 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
282 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),	/* PerfMon */
283 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
284 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
285 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
286 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
287 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
288 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
289 	ARM64_FTR_END,
290 };
291 
292 /*
293  * Common ftr bits for a 32bit register with all hidden, strict
294  * attributes, with 4bit feature fields and a default safe value of
295  * 0. Covers the following 32bit registers:
296  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
297  */
298 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
299 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
300 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
301 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
302 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
303 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
304 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
305 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
306 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
307 	ARM64_FTR_END,
308 };
309 
310 /* Table for a single 32bit feature value */
311 static const struct arm64_ftr_bits ftr_single32[] = {
312 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
313 	ARM64_FTR_END,
314 };
315 
316 static const struct arm64_ftr_bits ftr_raz[] = {
317 	ARM64_FTR_END,
318 };
319 
320 #define ARM64_FTR_REG(id, table) {		\
321 	.sys_id = id,				\
322 	.reg = 	&(struct arm64_ftr_reg){	\
323 		.name = #id,			\
324 		.ftr_bits = &((table)[0]),	\
325 	}}
326 
327 static const struct __ftr_reg_entry {
328 	u32			sys_id;
329 	struct arm64_ftr_reg 	*reg;
330 } arm64_ftr_regs[] = {
331 
332 	/* Op1 = 0, CRn = 0, CRm = 1 */
333 	ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
334 	ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
335 	ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
336 	ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
337 	ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
338 	ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
339 	ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
340 
341 	/* Op1 = 0, CRn = 0, CRm = 2 */
342 	ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
343 	ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
344 	ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
345 	ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
346 	ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
347 	ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
348 	ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
349 
350 	/* Op1 = 0, CRn = 0, CRm = 3 */
351 	ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
352 	ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
353 	ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
354 
355 	/* Op1 = 0, CRn = 0, CRm = 4 */
356 	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
357 	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
358 
359 	/* Op1 = 0, CRn = 0, CRm = 5 */
360 	ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
361 	ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
362 
363 	/* Op1 = 0, CRn = 0, CRm = 6 */
364 	ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
365 	ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
366 
367 	/* Op1 = 0, CRn = 0, CRm = 7 */
368 	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
369 	ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
370 	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
371 
372 	/* Op1 = 3, CRn = 0, CRm = 0 */
373 	{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
374 	ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
375 
376 	/* Op1 = 3, CRn = 14, CRm = 0 */
377 	ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
378 };
379 
search_cmp_ftr_reg(const void * id,const void * regp)380 static int search_cmp_ftr_reg(const void *id, const void *regp)
381 {
382 	return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
383 }
384 
385 /*
386  * get_arm64_ftr_reg - Lookup a feature register entry using its
387  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
388  * ascending order of sys_id , we use binary search to find a matching
389  * entry.
390  *
391  * returns - Upon success,  matching ftr_reg entry for id.
392  *         - NULL on failure. It is upto the caller to decide
393  *	     the impact of a failure.
394  */
get_arm64_ftr_reg(u32 sys_id)395 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
396 {
397 	const struct __ftr_reg_entry *ret;
398 
399 	ret = bsearch((const void *)(unsigned long)sys_id,
400 			arm64_ftr_regs,
401 			ARRAY_SIZE(arm64_ftr_regs),
402 			sizeof(arm64_ftr_regs[0]),
403 			search_cmp_ftr_reg);
404 	if (ret)
405 		return ret->reg;
406 	return NULL;
407 }
408 
arm64_ftr_set_value(const struct arm64_ftr_bits * ftrp,s64 reg,s64 ftr_val)409 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
410 			       s64 ftr_val)
411 {
412 	u64 mask = arm64_ftr_mask(ftrp);
413 
414 	reg &= ~mask;
415 	reg |= (ftr_val << ftrp->shift) & mask;
416 	return reg;
417 }
418 
arm64_ftr_safe_value(const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)419 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
420 				s64 cur)
421 {
422 	s64 ret = 0;
423 
424 	switch (ftrp->type) {
425 	case FTR_EXACT:
426 		ret = ftrp->safe_val;
427 		break;
428 	case FTR_LOWER_SAFE:
429 		ret = new < cur ? new : cur;
430 		break;
431 	case FTR_HIGHER_OR_ZERO_SAFE:
432 		if (!cur || !new)
433 			break;
434 		/* Fallthrough */
435 	case FTR_HIGHER_SAFE:
436 		ret = new > cur ? new : cur;
437 		break;
438 	default:
439 		BUG();
440 	}
441 
442 	return ret;
443 }
444 
sort_ftr_regs(void)445 static void __init sort_ftr_regs(void)
446 {
447 	int i;
448 
449 	/* Check that the array is sorted so that we can do the binary search */
450 	for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
451 		BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
452 }
453 
454 /*
455  * Initialise the CPU feature register from Boot CPU values.
456  * Also initiliases the strict_mask for the register.
457  * Any bits that are not covered by an arm64_ftr_bits entry are considered
458  * RES0 for the system-wide value, and must strictly match.
459  */
init_cpu_ftr_reg(u32 sys_reg,u64 new)460 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
461 {
462 	u64 val = 0;
463 	u64 strict_mask = ~0x0ULL;
464 	u64 user_mask = 0;
465 	u64 valid_mask = 0;
466 
467 	const struct arm64_ftr_bits *ftrp;
468 	struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
469 
470 	BUG_ON(!reg);
471 
472 	for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
473 		u64 ftr_mask = arm64_ftr_mask(ftrp);
474 		s64 ftr_new = arm64_ftr_value(ftrp, new);
475 
476 		val = arm64_ftr_set_value(ftrp, val, ftr_new);
477 
478 		valid_mask |= ftr_mask;
479 		if (!ftrp->strict)
480 			strict_mask &= ~ftr_mask;
481 		if (ftrp->visible)
482 			user_mask |= ftr_mask;
483 		else
484 			reg->user_val = arm64_ftr_set_value(ftrp,
485 							    reg->user_val,
486 							    ftrp->safe_val);
487 	}
488 
489 	val &= valid_mask;
490 
491 	reg->sys_val = val;
492 	reg->strict_mask = strict_mask;
493 	reg->user_mask = user_mask;
494 }
495 
496 extern const struct arm64_cpu_capabilities arm64_errata[];
497 static void __init setup_boot_cpu_capabilities(void);
498 
init_cpu_features(struct cpuinfo_arm64 * info)499 void __init init_cpu_features(struct cpuinfo_arm64 *info)
500 {
501 	/* Before we start using the tables, make sure it is sorted */
502 	sort_ftr_regs();
503 
504 	init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
505 	init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
506 	init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
507 	init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
508 	init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
509 	init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
510 	init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
511 	init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
512 	init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
513 	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
514 	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
515 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
516 
517 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
518 		init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
519 		init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
520 		init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
521 		init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
522 		init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
523 		init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
524 		init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
525 		init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
526 		init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
527 		init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
528 		init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
529 		init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
530 		init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
531 		init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
532 		init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
533 		init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
534 	}
535 
536 	/*
537 	 * Detect and enable early CPU capabilities based on the boot CPU,
538 	 * after we have initialised the CPU feature infrastructure.
539 	 */
540 	setup_boot_cpu_capabilities();
541 }
542 
update_cpu_ftr_reg(struct arm64_ftr_reg * reg,u64 new)543 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
544 {
545 	const struct arm64_ftr_bits *ftrp;
546 
547 	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
548 		s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
549 		s64 ftr_new = arm64_ftr_value(ftrp, new);
550 
551 		if (ftr_cur == ftr_new)
552 			continue;
553 		/* Find a safe value */
554 		ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
555 		reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
556 	}
557 
558 }
559 
check_update_ftr_reg(u32 sys_id,int cpu,u64 val,u64 boot)560 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
561 {
562 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
563 
564 	BUG_ON(!regp);
565 	update_cpu_ftr_reg(regp, val);
566 	if ((boot & regp->strict_mask) == (val & regp->strict_mask))
567 		return 0;
568 	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
569 			regp->name, boot, cpu, val);
570 	return 1;
571 }
572 
573 /*
574  * Update system wide CPU feature registers with the values from a
575  * non-boot CPU. Also performs SANITY checks to make sure that there
576  * aren't any insane variations from that of the boot CPU.
577  */
update_cpu_features(int cpu,struct cpuinfo_arm64 * info,struct cpuinfo_arm64 * boot)578 void update_cpu_features(int cpu,
579 			 struct cpuinfo_arm64 *info,
580 			 struct cpuinfo_arm64 *boot)
581 {
582 	int taint = 0;
583 
584 	/*
585 	 * The kernel can handle differing I-cache policies, but otherwise
586 	 * caches should look identical. Userspace JITs will make use of
587 	 * *minLine.
588 	 */
589 	taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
590 				      info->reg_ctr, boot->reg_ctr);
591 
592 	/*
593 	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
594 	 * could result in too much or too little memory being zeroed if a
595 	 * process is preempted and migrated between CPUs.
596 	 */
597 	taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
598 				      info->reg_dczid, boot->reg_dczid);
599 
600 	/* If different, timekeeping will be broken (especially with KVM) */
601 	taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
602 				      info->reg_cntfrq, boot->reg_cntfrq);
603 
604 	/*
605 	 * The kernel uses self-hosted debug features and expects CPUs to
606 	 * support identical debug features. We presently need CTX_CMPs, WRPs,
607 	 * and BRPs to be identical.
608 	 * ID_AA64DFR1 is currently RES0.
609 	 */
610 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
611 				      info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
612 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
613 				      info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
614 	/*
615 	 * Even in big.LITTLE, processors should be identical instruction-set
616 	 * wise.
617 	 */
618 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
619 				      info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
620 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
621 				      info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
622 
623 	/*
624 	 * Differing PARange support is fine as long as all peripherals and
625 	 * memory are mapped within the minimum PARange of all CPUs.
626 	 * Linux should not care about secure memory.
627 	 */
628 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
629 				      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
630 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
631 				      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
632 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
633 				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
634 
635 	/*
636 	 * EL3 is not our concern.
637 	 */
638 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
639 				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
640 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
641 				      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
642 
643 	/*
644 	 * If we have AArch32, we care about 32-bit features for compat.
645 	 * If the system doesn't support AArch32, don't update them.
646 	 */
647 	if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
648 		id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
649 
650 		taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
651 					info->reg_id_dfr0, boot->reg_id_dfr0);
652 		taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
653 					info->reg_id_isar0, boot->reg_id_isar0);
654 		taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
655 					info->reg_id_isar1, boot->reg_id_isar1);
656 		taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
657 					info->reg_id_isar2, boot->reg_id_isar2);
658 		taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
659 					info->reg_id_isar3, boot->reg_id_isar3);
660 		taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
661 					info->reg_id_isar4, boot->reg_id_isar4);
662 		taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
663 					info->reg_id_isar5, boot->reg_id_isar5);
664 
665 		/*
666 		 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
667 		 * ACTLR formats could differ across CPUs and therefore would have to
668 		 * be trapped for virtualization anyway.
669 		 */
670 		taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
671 					info->reg_id_mmfr0, boot->reg_id_mmfr0);
672 		taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
673 					info->reg_id_mmfr1, boot->reg_id_mmfr1);
674 		taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
675 					info->reg_id_mmfr2, boot->reg_id_mmfr2);
676 		taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
677 					info->reg_id_mmfr3, boot->reg_id_mmfr3);
678 		taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
679 					info->reg_id_pfr0, boot->reg_id_pfr0);
680 		taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
681 					info->reg_id_pfr1, boot->reg_id_pfr1);
682 		taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
683 					info->reg_mvfr0, boot->reg_mvfr0);
684 		taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
685 					info->reg_mvfr1, boot->reg_mvfr1);
686 		taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
687 					info->reg_mvfr2, boot->reg_mvfr2);
688 	}
689 
690 	/*
691 	 * Mismatched CPU features are a recipe for disaster. Don't even
692 	 * pretend to support them.
693 	 */
694 	if (taint) {
695 		pr_warn_once("Unsupported CPU feature variation detected.\n");
696 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
697 	}
698 }
699 
read_sanitised_ftr_reg(u32 id)700 u64 read_sanitised_ftr_reg(u32 id)
701 {
702 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
703 
704 	/* We shouldn't get a request for an unsupported register */
705 	BUG_ON(!regp);
706 	return regp->sys_val;
707 }
708 
709 #define read_sysreg_case(r)	\
710 	case r:		return read_sysreg_s(r)
711 
712 /*
713  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
714  * Read the system register on the current CPU
715  */
__read_sysreg_by_encoding(u32 sys_id)716 static u64 __read_sysreg_by_encoding(u32 sys_id)
717 {
718 	switch (sys_id) {
719 	read_sysreg_case(SYS_ID_PFR0_EL1);
720 	read_sysreg_case(SYS_ID_PFR1_EL1);
721 	read_sysreg_case(SYS_ID_DFR0_EL1);
722 	read_sysreg_case(SYS_ID_MMFR0_EL1);
723 	read_sysreg_case(SYS_ID_MMFR1_EL1);
724 	read_sysreg_case(SYS_ID_MMFR2_EL1);
725 	read_sysreg_case(SYS_ID_MMFR3_EL1);
726 	read_sysreg_case(SYS_ID_ISAR0_EL1);
727 	read_sysreg_case(SYS_ID_ISAR1_EL1);
728 	read_sysreg_case(SYS_ID_ISAR2_EL1);
729 	read_sysreg_case(SYS_ID_ISAR3_EL1);
730 	read_sysreg_case(SYS_ID_ISAR4_EL1);
731 	read_sysreg_case(SYS_ID_ISAR5_EL1);
732 	read_sysreg_case(SYS_MVFR0_EL1);
733 	read_sysreg_case(SYS_MVFR1_EL1);
734 	read_sysreg_case(SYS_MVFR2_EL1);
735 
736 	read_sysreg_case(SYS_ID_AA64PFR0_EL1);
737 	read_sysreg_case(SYS_ID_AA64PFR1_EL1);
738 	read_sysreg_case(SYS_ID_AA64DFR0_EL1);
739 	read_sysreg_case(SYS_ID_AA64DFR1_EL1);
740 	read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
741 	read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
742 	read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
743 	read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
744 	read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
745 
746 	read_sysreg_case(SYS_CNTFRQ_EL0);
747 	read_sysreg_case(SYS_CTR_EL0);
748 	read_sysreg_case(SYS_DCZID_EL0);
749 
750 	default:
751 		BUG();
752 		return 0;
753 	}
754 }
755 
756 #include <linux/irqchip/arm-gic-v3.h>
757 
758 static bool
feature_matches(u64 reg,const struct arm64_cpu_capabilities * entry)759 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
760 {
761 	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
762 
763 	return val >= entry->min_field_value;
764 }
765 
766 static bool
has_cpuid_feature(const struct arm64_cpu_capabilities * entry,int scope)767 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
768 {
769 	u64 val;
770 
771 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
772 	if (scope == SCOPE_SYSTEM)
773 		val = read_sanitised_ftr_reg(entry->sys_reg);
774 	else
775 		val = __read_sysreg_by_encoding(entry->sys_reg);
776 
777 	return feature_matches(val, entry);
778 }
779 
has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities * entry,int scope)780 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
781 {
782 	bool has_sre;
783 
784 	if (!has_cpuid_feature(entry, scope))
785 		return false;
786 
787 	has_sre = gic_enable_sre();
788 	if (!has_sre)
789 		pr_warn_once("%s present but disabled by higher exception level\n",
790 			     entry->desc);
791 
792 	return has_sre;
793 }
794 
has_no_hw_prefetch(const struct arm64_cpu_capabilities * entry,int __unused)795 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
796 {
797 	u32 midr = read_cpuid_id();
798 
799 	/* Cavium ThunderX pass 1.x and 2.x */
800 	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
801 		MIDR_CPU_VAR_REV(0, 0),
802 		MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
803 }
804 
hyp_offset_low(const struct arm64_cpu_capabilities * entry,int __unused)805 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
806 			   int __unused)
807 {
808 	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
809 
810 	/*
811 	 * Activate the lower HYP offset only if:
812 	 * - the idmap doesn't clash with it,
813 	 * - the kernel is not running at EL2.
814 	 */
815 	return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
816 }
817 
has_no_fpsimd(const struct arm64_cpu_capabilities * entry,int __unused)818 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
819 {
820 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
821 
822 	return cpuid_feature_extract_signed_field(pfr0,
823 					ID_AA64PFR0_FP_SHIFT) < 0;
824 }
825 
826 static bool __meltdown_safe = true;
827 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
828 
unmap_kernel_at_el0(const struct arm64_cpu_capabilities * entry,int scope)829 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
830 				int scope)
831 {
832 	/* List of CPUs that are not vulnerable and don't need KPTI */
833 	static const struct midr_range kpti_safe_list[] = {
834 		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
835 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
836 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
837 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
838 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
839 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
840 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
841 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
842 		{ /* sentinel */ }
843 	};
844 	char const *str = "kpti command line option";
845 	bool meltdown_safe;
846 
847 	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
848 
849 	/* Defer to CPU feature registers */
850 	if (has_cpuid_feature(entry, scope))
851 		meltdown_safe = true;
852 
853 	if (!meltdown_safe)
854 		__meltdown_safe = false;
855 
856 	/*
857 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
858 	 * ThunderX leads to apparent I-cache corruption of kernel text, which
859 	 * ends as well as you might imagine. Don't even try.
860 	 */
861 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
862 		str = "ARM64_WORKAROUND_CAVIUM_27456";
863 		__kpti_forced = -1;
864 	}
865 
866 	/* Useful for KASLR robustness */
867 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
868 		if (!__kpti_forced) {
869 			str = "KASLR";
870 			__kpti_forced = 1;
871 		}
872 	}
873 
874 	if (cpu_mitigations_off() && !__kpti_forced) {
875 		str = "mitigations=off";
876 		__kpti_forced = -1;
877 	}
878 
879 	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
880 		pr_info_once("kernel page table isolation disabled by kernel configuration\n");
881 		return false;
882 	}
883 
884 	/* Forced? */
885 	if (__kpti_forced) {
886 		pr_info_once("kernel page table isolation forced %s by %s\n",
887 			     __kpti_forced > 0 ? "ON" : "OFF", str);
888 		return __kpti_forced > 0;
889 	}
890 
891 	return !meltdown_safe;
892 }
893 
894 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
895 static void __nocfi
kpti_install_ng_mappings(const struct arm64_cpu_capabilities * __unused)896 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
897 {
898 	typedef void (kpti_remap_fn)(int, int, phys_addr_t);
899 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
900 	kpti_remap_fn *remap_fn;
901 
902 	static bool kpti_applied = false;
903 	int cpu = smp_processor_id();
904 
905 	if (kpti_applied)
906 		return;
907 
908 	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
909 
910 	cpu_install_idmap();
911 	remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
912 	cpu_uninstall_idmap();
913 
914 	if (!cpu)
915 		kpti_applied = true;
916 
917 	return;
918 }
919 #else
920 static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities * __unused)921 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
922 {
923 }
924 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
925 
parse_kpti(char * str)926 static int __init parse_kpti(char *str)
927 {
928 	bool enabled;
929 	int ret = strtobool(str, &enabled);
930 
931 	if (ret)
932 		return ret;
933 
934 	__kpti_forced = enabled ? 1 : -1;
935 	return 0;
936 }
937 early_param("kpti", parse_kpti);
938 
939 #ifdef CONFIG_ARM64_VHE
runs_at_el2(const struct arm64_cpu_capabilities * entry,int __unused)940 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
941 {
942 	return is_kernel_in_hyp_mode();
943 }
944 
cpu_copy_el2regs(const struct arm64_cpu_capabilities * __unused)945 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
946 {
947 	/*
948 	 * Copy register values that aren't redirected by hardware.
949 	 *
950 	 * Before code patching, we only set tpidr_el1, all CPUs need to copy
951 	 * this value to tpidr_el2 before we patch the code. Once we've done
952 	 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
953 	 * do anything here.
954 	 */
955 	if (!alternatives_applied)
956 		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
957 }
958 #endif
959 
960 #ifdef CONFIG_ARM64_SSBD
ssbs_emulation_handler(struct pt_regs * regs,u32 instr)961 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
962 {
963 	if (user_mode(regs))
964 		return 1;
965 
966 	if (instr & BIT(CRm_shift))
967 		regs->pstate |= PSR_SSBS_BIT;
968 	else
969 		regs->pstate &= ~PSR_SSBS_BIT;
970 
971 	arm64_skip_faulting_instruction(regs, 4);
972 	return 0;
973 }
974 
975 static struct undef_hook ssbs_emulation_hook = {
976 	.instr_mask	= ~(1U << CRm_shift),
977 	.instr_val	= 0xd500001f | REG_PSTATE_SSBS_IMM,
978 	.fn		= ssbs_emulation_handler,
979 };
980 
cpu_enable_ssbs(const struct arm64_cpu_capabilities * __unused)981 static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
982 {
983 	static bool undef_hook_registered = false;
984 	static DEFINE_SPINLOCK(hook_lock);
985 
986 	spin_lock(&hook_lock);
987 	if (!undef_hook_registered) {
988 		register_undef_hook(&ssbs_emulation_hook);
989 		undef_hook_registered = true;
990 	}
991 	spin_unlock(&hook_lock);
992 
993 	if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
994 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
995 		arm64_set_ssbd_mitigation(false);
996 	} else {
997 		arm64_set_ssbd_mitigation(true);
998 	}
999 }
1000 #endif /* CONFIG_ARM64_SSBD */
1001 
1002 static const struct arm64_cpu_capabilities arm64_features[] = {
1003 	{
1004 		.desc = "GIC system register CPU interface",
1005 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1006 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1007 		.matches = has_useable_gicv3_cpuif,
1008 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1009 		.field_pos = ID_AA64PFR0_GIC_SHIFT,
1010 		.sign = FTR_UNSIGNED,
1011 		.min_field_value = 1,
1012 	},
1013 #ifdef CONFIG_ARM64_PAN
1014 	{
1015 		.desc = "Privileged Access Never",
1016 		.capability = ARM64_HAS_PAN,
1017 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1018 		.matches = has_cpuid_feature,
1019 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
1020 		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
1021 		.sign = FTR_UNSIGNED,
1022 		.min_field_value = 1,
1023 		.cpu_enable = cpu_enable_pan,
1024 	},
1025 #endif /* CONFIG_ARM64_PAN */
1026 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1027 	{
1028 		.desc = "LSE atomic instructions",
1029 		.capability = ARM64_HAS_LSE_ATOMICS,
1030 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1031 		.matches = has_cpuid_feature,
1032 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
1033 		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1034 		.sign = FTR_UNSIGNED,
1035 		.min_field_value = 2,
1036 	},
1037 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1038 	{
1039 		.desc = "Software prefetching using PRFM",
1040 		.capability = ARM64_HAS_NO_HW_PREFETCH,
1041 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1042 		.matches = has_no_hw_prefetch,
1043 	},
1044 #ifdef CONFIG_ARM64_UAO
1045 	{
1046 		.desc = "User Access Override",
1047 		.capability = ARM64_HAS_UAO,
1048 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1049 		.matches = has_cpuid_feature,
1050 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
1051 		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
1052 		.min_field_value = 1,
1053 		/*
1054 		 * We rely on stop_machine() calling uao_thread_switch() to set
1055 		 * UAO immediately after patching.
1056 		 */
1057 	},
1058 #endif /* CONFIG_ARM64_UAO */
1059 #ifdef CONFIG_ARM64_PAN
1060 	{
1061 		.capability = ARM64_ALT_PAN_NOT_UAO,
1062 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1063 		.matches = cpufeature_pan_not_uao,
1064 	},
1065 #endif /* CONFIG_ARM64_PAN */
1066 #ifdef CONFIG_ARM64_VHE
1067 	{
1068 		.desc = "Virtualization Host Extensions",
1069 		.capability = ARM64_HAS_VIRT_HOST_EXTN,
1070 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1071 		.matches = runs_at_el2,
1072 		.cpu_enable = cpu_copy_el2regs,
1073 	},
1074 #endif	/* CONFIG_ARM64_VHE */
1075 	{
1076 		.desc = "32-bit EL0 Support",
1077 		.capability = ARM64_HAS_32BIT_EL0,
1078 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1079 		.matches = has_cpuid_feature,
1080 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1081 		.sign = FTR_UNSIGNED,
1082 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
1083 		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1084 	},
1085 	{
1086 		.desc = "Reduced HYP mapping offset",
1087 		.capability = ARM64_HYP_OFFSET_LOW,
1088 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1089 		.matches = hyp_offset_low,
1090 	},
1091 	{
1092 		.desc = "Kernel page table isolation (KPTI)",
1093 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
1094 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1095 		/*
1096 		 * The ID feature fields below are used to indicate that
1097 		 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1098 		 * more details.
1099 		 */
1100 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1101 		.field_pos = ID_AA64PFR0_CSV3_SHIFT,
1102 		.min_field_value = 1,
1103 		.matches = unmap_kernel_at_el0,
1104 		.cpu_enable = kpti_install_ng_mappings,
1105 	},
1106 	{
1107 		/* FP/SIMD is not implemented */
1108 		.capability = ARM64_HAS_NO_FPSIMD,
1109 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1110 		.min_field_value = 0,
1111 		.matches = has_no_fpsimd,
1112 	},
1113 #ifdef CONFIG_ARM64_PMEM
1114 	{
1115 		.desc = "Data cache clean to Point of Persistence",
1116 		.capability = ARM64_HAS_DCPOP,
1117 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1118 		.matches = has_cpuid_feature,
1119 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
1120 		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
1121 		.min_field_value = 1,
1122 	},
1123 #endif
1124 #ifdef CONFIG_ARM64_SSBD
1125 	{
1126 		.desc = "Speculative Store Bypassing Safe (SSBS)",
1127 		.capability = ARM64_SSBS,
1128 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1129 		.matches = has_cpuid_feature,
1130 		.sys_reg = SYS_ID_AA64PFR1_EL1,
1131 		.field_pos = ID_AA64PFR1_SSBS_SHIFT,
1132 		.sign = FTR_UNSIGNED,
1133 		.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1134 		.cpu_enable = cpu_enable_ssbs,
1135 	},
1136 #endif
1137 	{},
1138 };
1139 
1140 
1141 #define HWCAP_CPUID_MATCH(reg, field, s, min_value)		\
1142 		.matches = has_cpuid_feature,			\
1143 		.sys_reg = reg,					\
1144 		.field_pos = field,				\
1145 		.sign = s,					\
1146 		.min_field_value = min_value,			\
1147 
1148 #define __HWCAP_CAP(name, cap_type, cap)			\
1149 		.desc = name,					\
1150 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,		\
1151 		.hwcap_type = cap_type,				\
1152 		.hwcap = cap,					\
1153 
1154 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)	\
1155 	{							\
1156 		__HWCAP_CAP(#cap, cap_type, cap)		\
1157 		HWCAP_CPUID_MATCH(reg, field, s, min_value)	\
1158 	}
1159 
1160 #define HWCAP_CAP_MATCH(match, cap_type, cap)			\
1161 	{							\
1162 		__HWCAP_CAP(#cap, cap_type, cap)		\
1163 		.matches = match,				\
1164 	}
1165 
1166 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1167 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1168 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1169 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1170 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1171 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1172 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1173 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1174 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1175 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
1176 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
1177 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
1178 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1179 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1180 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
1181 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1182 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1183 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1184 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1185 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
1186 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1187 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1188 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1189 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1190 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
1191 	HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
1192 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
1193 	{},
1194 };
1195 
1196 #ifdef CONFIG_COMPAT
compat_has_neon(const struct arm64_cpu_capabilities * cap,int scope)1197 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
1198 {
1199 	/*
1200 	 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
1201 	 * in line with that of arm32 as in vfp_init(). We make sure that the
1202 	 * check is future proof, by making sure value is non-zero.
1203 	 */
1204 	u32 mvfr1;
1205 
1206 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1207 	if (scope == SCOPE_SYSTEM)
1208 		mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
1209 	else
1210 		mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
1211 
1212 	return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
1213 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
1214 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
1215 }
1216 #endif
1217 
1218 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1219 #ifdef CONFIG_COMPAT
1220 	HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
1221 	HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
1222 	/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
1223 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
1224 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
1225 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1226 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1227 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1228 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1229 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1230 #endif
1231 	{},
1232 };
1233 
cap_set_elf_hwcap(const struct arm64_cpu_capabilities * cap)1234 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1235 {
1236 	switch (cap->hwcap_type) {
1237 	case CAP_HWCAP:
1238 		elf_hwcap |= cap->hwcap;
1239 		break;
1240 #ifdef CONFIG_COMPAT
1241 	case CAP_COMPAT_HWCAP:
1242 		compat_elf_hwcap |= (u32)cap->hwcap;
1243 		break;
1244 	case CAP_COMPAT_HWCAP2:
1245 		compat_elf_hwcap2 |= (u32)cap->hwcap;
1246 		break;
1247 #endif
1248 	default:
1249 		WARN_ON(1);
1250 		break;
1251 	}
1252 }
1253 
1254 /* Check if we have a particular HWCAP enabled */
cpus_have_elf_hwcap(const struct arm64_cpu_capabilities * cap)1255 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1256 {
1257 	bool rc;
1258 
1259 	switch (cap->hwcap_type) {
1260 	case CAP_HWCAP:
1261 		rc = (elf_hwcap & cap->hwcap) != 0;
1262 		break;
1263 #ifdef CONFIG_COMPAT
1264 	case CAP_COMPAT_HWCAP:
1265 		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1266 		break;
1267 	case CAP_COMPAT_HWCAP2:
1268 		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1269 		break;
1270 #endif
1271 	default:
1272 		WARN_ON(1);
1273 		rc = false;
1274 	}
1275 
1276 	return rc;
1277 }
1278 
setup_elf_hwcaps(const struct arm64_cpu_capabilities * hwcaps)1279 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1280 {
1281 	/* We support emulation of accesses to CPU ID feature registers */
1282 	elf_hwcap |= HWCAP_CPUID;
1283 	for (; hwcaps->matches; hwcaps++)
1284 		if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1285 			cap_set_elf_hwcap(hwcaps);
1286 }
1287 
1288 /*
1289  * Check if the current CPU has a given feature capability.
1290  * Should be called from non-preemptible context.
1291  */
__this_cpu_has_cap(const struct arm64_cpu_capabilities * cap_array,unsigned int cap)1292 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1293 			       unsigned int cap)
1294 {
1295 	const struct arm64_cpu_capabilities *caps;
1296 
1297 	if (WARN_ON(preemptible()))
1298 		return false;
1299 
1300 	for (caps = cap_array; caps->matches; caps++)
1301 		if (caps->capability == cap &&
1302 		    caps->matches(caps, SCOPE_LOCAL_CPU))
1303 			return true;
1304 	return false;
1305 }
1306 
__update_cpu_capabilities(const struct arm64_cpu_capabilities * caps,u16 scope_mask,const char * info)1307 static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1308 				      u16 scope_mask, const char *info)
1309 {
1310 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1311 	for (; caps->matches; caps++) {
1312 		if (!(caps->type & scope_mask) ||
1313 		    !caps->matches(caps, cpucap_default_scope(caps)))
1314 			continue;
1315 
1316 		if (!cpus_have_cap(caps->capability) && caps->desc)
1317 			pr_info("%s %s\n", info, caps->desc);
1318 		cpus_set_cap(caps->capability);
1319 	}
1320 }
1321 
update_cpu_capabilities(u16 scope_mask)1322 static void update_cpu_capabilities(u16 scope_mask)
1323 {
1324 	__update_cpu_capabilities(arm64_errata, scope_mask,
1325 				  "enabling workaround for");
1326 	__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1327 }
1328 
__enable_cpu_capability(void * arg)1329 static int __enable_cpu_capability(void *arg)
1330 {
1331 	const struct arm64_cpu_capabilities *cap = arg;
1332 
1333 	cap->cpu_enable(cap);
1334 	return 0;
1335 }
1336 
1337 /*
1338  * Run through the enabled capabilities and enable() it on all active
1339  * CPUs
1340  */
1341 static void __init
__enable_cpu_capabilities(const struct arm64_cpu_capabilities * caps,u16 scope_mask)1342 __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1343 			  u16 scope_mask)
1344 {
1345 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1346 	for (; caps->matches; caps++) {
1347 		unsigned int num = caps->capability;
1348 
1349 		if (!(caps->type & scope_mask) || !cpus_have_cap(num))
1350 			continue;
1351 
1352 		/* Ensure cpus_have_const_cap(num) works */
1353 		static_branch_enable(&cpu_hwcap_keys[num]);
1354 
1355 		if (caps->cpu_enable) {
1356 			/*
1357 			 * Capabilities with SCOPE_BOOT_CPU scope are finalised
1358 			 * before any secondary CPU boots. Thus, each secondary
1359 			 * will enable the capability as appropriate via
1360 			 * check_local_cpu_capabilities(). The only exception is
1361 			 * the boot CPU, for which the capability must be
1362 			 * enabled here. This approach avoids costly
1363 			 * stop_machine() calls for this case.
1364 			 *
1365 			 * Otherwise, use stop_machine() as it schedules the
1366 			 * work allowing us to modify PSTATE, instead of
1367 			 * on_each_cpu() which uses an IPI, giving us a PSTATE
1368 			 * that disappears when we return.
1369 			 */
1370 			if (scope_mask & SCOPE_BOOT_CPU)
1371 				caps->cpu_enable(caps);
1372 			else
1373 				stop_machine(__enable_cpu_capability,
1374 					     (void *)caps, cpu_online_mask);
1375 		}
1376 	}
1377 }
1378 
enable_cpu_capabilities(u16 scope_mask)1379 static void __init enable_cpu_capabilities(u16 scope_mask)
1380 {
1381 	__enable_cpu_capabilities(arm64_errata, scope_mask);
1382 	__enable_cpu_capabilities(arm64_features, scope_mask);
1383 }
1384 
1385 /*
1386  * Flag to indicate if we have computed the system wide
1387  * capabilities based on the boot time active CPUs. This
1388  * will be used to determine if a new booting CPU should
1389  * go through the verification process to make sure that it
1390  * supports the system capabilities, without using a hotplug
1391  * notifier.
1392  */
1393 static bool sys_caps_initialised;
1394 
set_sys_caps_initialised(void)1395 static inline void set_sys_caps_initialised(void)
1396 {
1397 	sys_caps_initialised = true;
1398 }
1399 
1400 /*
1401  * Run through the list of capabilities to check for conflicts.
1402  * If the system has already detected a capability, take necessary
1403  * action on this CPU.
1404  *
1405  * Returns "false" on conflicts.
1406  */
1407 static bool
__verify_local_cpu_caps(const struct arm64_cpu_capabilities * caps_list,u16 scope_mask)1408 __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list,
1409 			u16 scope_mask)
1410 {
1411 	bool cpu_has_cap, system_has_cap;
1412 	const struct arm64_cpu_capabilities *caps;
1413 
1414 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1415 
1416 	for (caps = caps_list; caps->matches; caps++) {
1417 		if (!(caps->type & scope_mask))
1418 			continue;
1419 
1420 		cpu_has_cap = __this_cpu_has_cap(caps_list, caps->capability);
1421 		system_has_cap = cpus_have_cap(caps->capability);
1422 
1423 		if (system_has_cap) {
1424 			/*
1425 			 * Check if the new CPU misses an advertised feature,
1426 			 * which is not safe to miss.
1427 			 */
1428 			if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
1429 				break;
1430 			/*
1431 			 * We have to issue cpu_enable() irrespective of
1432 			 * whether the CPU has it or not, as it is enabeld
1433 			 * system wide. It is upto the call back to take
1434 			 * appropriate action on this CPU.
1435 			 */
1436 			if (caps->cpu_enable)
1437 				caps->cpu_enable(caps);
1438 		} else {
1439 			/*
1440 			 * Check if the CPU has this capability if it isn't
1441 			 * safe to have when the system doesn't.
1442 			 */
1443 			if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
1444 				break;
1445 		}
1446 	}
1447 
1448 	if (caps->matches) {
1449 		pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
1450 			smp_processor_id(), caps->capability,
1451 			caps->desc, system_has_cap, cpu_has_cap);
1452 		return false;
1453 	}
1454 
1455 	return true;
1456 }
1457 
verify_local_cpu_caps(u16 scope_mask)1458 static bool verify_local_cpu_caps(u16 scope_mask)
1459 {
1460 	return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
1461 	       __verify_local_cpu_caps(arm64_features, scope_mask);
1462 }
1463 
1464 /*
1465  * Check for CPU features that are used in early boot
1466  * based on the Boot CPU value.
1467  */
check_early_cpu_features(void)1468 static void check_early_cpu_features(void)
1469 {
1470 	verify_cpu_asid_bits();
1471 	/*
1472 	 * Early features are used by the kernel already. If there
1473 	 * is a conflict, we cannot proceed further.
1474 	 */
1475 	if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1476 		cpu_panic_kernel();
1477 }
1478 
1479 static void
verify_local_elf_hwcaps(const struct arm64_cpu_capabilities * caps)1480 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1481 {
1482 
1483 	for (; caps->matches; caps++)
1484 		if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1485 			pr_crit("CPU%d: missing HWCAP: %s\n",
1486 					smp_processor_id(), caps->desc);
1487 			cpu_die_early();
1488 		}
1489 }
1490 
1491 
1492 /*
1493  * Run through the enabled system capabilities and enable() it on this CPU.
1494  * The capabilities were decided based on the available CPUs at the boot time.
1495  * Any new CPU should match the system wide status of the capability. If the
1496  * new CPU doesn't have a capability which the system now has enabled, we
1497  * cannot do anything to fix it up and could cause unexpected failures. So
1498  * we park the CPU.
1499  */
verify_local_cpu_capabilities(void)1500 static void verify_local_cpu_capabilities(void)
1501 {
1502 	/*
1503 	 * The capabilities with SCOPE_BOOT_CPU are checked from
1504 	 * check_early_cpu_features(), as they need to be verified
1505 	 * on all secondary CPUs.
1506 	 */
1507 	if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1508 		cpu_die_early();
1509 
1510 	verify_local_elf_hwcaps(arm64_elf_hwcaps);
1511 	if (system_supports_32bit_el0())
1512 		verify_local_elf_hwcaps(compat_elf_hwcaps);
1513 }
1514 
check_local_cpu_capabilities(void)1515 void check_local_cpu_capabilities(void)
1516 {
1517 	/*
1518 	 * All secondary CPUs should conform to the early CPU features
1519 	 * in use by the kernel based on boot CPU.
1520 	 */
1521 	check_early_cpu_features();
1522 
1523 	/*
1524 	 * If we haven't finalised the system capabilities, this CPU gets
1525 	 * a chance to update the errata work arounds and local features.
1526 	 * Otherwise, this CPU should verify that it has all the system
1527 	 * advertised capabilities.
1528 	 */
1529 	if (!sys_caps_initialised)
1530 		update_cpu_capabilities(SCOPE_LOCAL_CPU);
1531 	else
1532 		verify_local_cpu_capabilities();
1533 }
1534 
setup_boot_cpu_capabilities(void)1535 static void __init setup_boot_cpu_capabilities(void)
1536 {
1537 	/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
1538 	update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
1539 	/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
1540 	enable_cpu_capabilities(SCOPE_BOOT_CPU);
1541 }
1542 
1543 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1544 EXPORT_SYMBOL(arm64_const_caps_ready);
1545 
mark_const_caps_ready(void)1546 static void __init mark_const_caps_ready(void)
1547 {
1548 	static_branch_enable(&arm64_const_caps_ready);
1549 }
1550 
1551 extern const struct arm64_cpu_capabilities arm64_errata[];
1552 
this_cpu_has_cap(unsigned int cap)1553 bool this_cpu_has_cap(unsigned int cap)
1554 {
1555 	return (__this_cpu_has_cap(arm64_features, cap) ||
1556 		__this_cpu_has_cap(arm64_errata, cap));
1557 }
1558 
setup_system_capabilities(void)1559 static void __init setup_system_capabilities(void)
1560 {
1561 	/*
1562 	 * We have finalised the system-wide safe feature
1563 	 * registers, finalise the capabilities that depend
1564 	 * on it. Also enable all the available capabilities,
1565 	 * that are not enabled already.
1566 	 */
1567 	update_cpu_capabilities(SCOPE_SYSTEM);
1568 	enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
1569 }
1570 
setup_cpu_features(void)1571 void __init setup_cpu_features(void)
1572 {
1573 	u32 cwg;
1574 	int cls;
1575 
1576 	setup_system_capabilities();
1577 	mark_const_caps_ready();
1578 	setup_elf_hwcaps(arm64_elf_hwcaps);
1579 
1580 	if (system_supports_32bit_el0())
1581 		setup_elf_hwcaps(compat_elf_hwcaps);
1582 
1583 	/* Advertise that we have computed the system capabilities */
1584 	set_sys_caps_initialised();
1585 
1586 	/*
1587 	 * Check for sane CTR_EL0.CWG value.
1588 	 */
1589 	cwg = cache_type_cwg();
1590 	cls = cache_line_size();
1591 	if (!cwg)
1592 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
1593 			cls);
1594 	if (L1_CACHE_BYTES < cls)
1595 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
1596 			L1_CACHE_BYTES, cls);
1597 }
1598 
1599 static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities * entry,int __unused)1600 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1601 {
1602 	return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1603 }
1604 
1605 /*
1606  * We emulate only the following system register space.
1607  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
1608  * See Table C5-6 System instruction encodings for System register accesses,
1609  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
1610  */
is_emulated(u32 id)1611 static inline bool __attribute_const__ is_emulated(u32 id)
1612 {
1613 	return (sys_reg_Op0(id) == 0x3 &&
1614 		sys_reg_CRn(id) == 0x0 &&
1615 		sys_reg_Op1(id) == 0x0 &&
1616 		(sys_reg_CRm(id) == 0 ||
1617 		 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
1618 }
1619 
1620 /*
1621  * With CRm == 0, reg should be one of :
1622  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
1623  */
emulate_id_reg(u32 id,u64 * valp)1624 static inline int emulate_id_reg(u32 id, u64 *valp)
1625 {
1626 	switch (id) {
1627 	case SYS_MIDR_EL1:
1628 		*valp = read_cpuid_id();
1629 		break;
1630 	case SYS_MPIDR_EL1:
1631 		*valp = SYS_MPIDR_SAFE_VAL;
1632 		break;
1633 	case SYS_REVIDR_EL1:
1634 		/* IMPLEMENTATION DEFINED values are emulated with 0 */
1635 		*valp = 0;
1636 		break;
1637 	default:
1638 		return -EINVAL;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
emulate_sys_reg(u32 id,u64 * valp)1644 static int emulate_sys_reg(u32 id, u64 *valp)
1645 {
1646 	struct arm64_ftr_reg *regp;
1647 
1648 	if (!is_emulated(id))
1649 		return -EINVAL;
1650 
1651 	if (sys_reg_CRm(id) == 0)
1652 		return emulate_id_reg(id, valp);
1653 
1654 	regp = get_arm64_ftr_reg(id);
1655 	if (regp)
1656 		*valp = arm64_ftr_reg_user_value(regp);
1657 	else
1658 		/*
1659 		 * The untracked registers are either IMPLEMENTATION DEFINED
1660 		 * (e.g, ID_AFR0_EL1) or reserved RAZ.
1661 		 */
1662 		*valp = 0;
1663 	return 0;
1664 }
1665 
emulate_mrs(struct pt_regs * regs,u32 insn)1666 static int emulate_mrs(struct pt_regs *regs, u32 insn)
1667 {
1668 	int rc;
1669 	u32 sys_reg, dst;
1670 	u64 val;
1671 
1672 	/*
1673 	 * sys_reg values are defined as used in mrs/msr instruction.
1674 	 * shift the imm value to get the encoding.
1675 	 */
1676 	sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1677 	rc = emulate_sys_reg(sys_reg, &val);
1678 	if (!rc) {
1679 		dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1680 		pt_regs_write_reg(regs, dst, val);
1681 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1682 	}
1683 
1684 	return rc;
1685 }
1686 
1687 static struct undef_hook mrs_hook = {
1688 	.instr_mask = 0xfff00000,
1689 	.instr_val  = 0xd5300000,
1690 	.pstate_mask = COMPAT_PSR_MODE_MASK,
1691 	.pstate_val = PSR_MODE_EL0t,
1692 	.fn = emulate_mrs,
1693 };
1694 
enable_mrs_emulation(void)1695 static int __init enable_mrs_emulation(void)
1696 {
1697 	register_undef_hook(&mrs_hook);
1698 	return 0;
1699 }
1700 
1701 core_initcall(enable_mrs_emulation);
1702 
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)1703 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
1704 			  char *buf)
1705 {
1706 	if (__meltdown_safe)
1707 		return sprintf(buf, "Not affected\n");
1708 
1709 	if (arm64_kernel_unmapped_at_el0())
1710 		return sprintf(buf, "Mitigation: PTI\n");
1711 
1712 	return sprintf(buf, "Vulnerable\n");
1713 }
1714