1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Contains CPU feature definitions
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 *
7 * A note for the weary kernel hacker: the code here is confusing and hard to
8 * follow! That's partly because it's solving a nasty problem, but also because
9 * there's a little bit of over-abstraction that tends to obscure what's going
10 * on behind a maze of helper functions and macros.
11 *
12 * The basic problem is that hardware folks have started gluing together CPUs
13 * with distinct architectural features; in some cases even creating SoCs where
14 * user-visible instructions are available only on a subset of the available
15 * cores. We try to address this by snapshotting the feature registers of the
16 * boot CPU and comparing these with the feature registers of each secondary
17 * CPU when bringing them up. If there is a mismatch, then we update the
18 * snapshot state to indicate the lowest-common denominator of the feature,
19 * known as the "safe" value. This snapshot state can be queried to view the
20 * "sanitised" value of a feature register.
21 *
22 * The sanitised register values are used to decide which capabilities we
23 * have in the system. These may be in the form of traditional "hwcaps"
24 * advertised to userspace or internal "cpucaps" which are used to configure
25 * things like alternative patching and static keys. While a feature mismatch
26 * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
27 * may prevent a CPU from being onlined at all.
28 *
29 * Some implementation details worth remembering:
30 *
31 * - Mismatched features are *always* sanitised to a "safe" value, which
32 * usually indicates that the feature is not supported.
33 *
34 * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35 * warning when onlining an offending CPU and the kernel will be tainted
36 * with TAINT_CPU_OUT_OF_SPEC.
37 *
38 * - Features marked as FTR_VISIBLE have their sanitised value visible to
39 * userspace. FTR_VISIBLE features in registers that are only visible
40 * to EL0 by trapping *must* have a corresponding HWCAP so that late
41 * onlining of CPUs cannot lead to features disappearing at runtime.
42 *
43 * - A "feature" is typically a 4-bit register field. A "capability" is the
44 * high-level description derived from the sanitised field value.
45 *
46 * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
47 * scheme for fields in ID registers") to understand when feature fields
48 * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
49 *
50 * - KVM exposes its own view of the feature registers to guest operating
51 * systems regardless of FTR_VISIBLE. This is typically driven from the
52 * sanitised register values to allow virtual CPUs to be migrated between
53 * arbitrary physical CPUs, but some features not present on the host are
54 * also advertised and emulated. Look at sys_reg_descs[] for the gory
55 * details.
56 *
57 * - If the arm64_ftr_bits[] for a register has a missing field, then this
58 * field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
59 * This is stronger than FTR_HIDDEN and can be used to hide features from
60 * KVM guests.
61 */
62
63 #define pr_fmt(fmt) "CPU features: " fmt
64
65 #include <linux/bsearch.h>
66 #include <linux/cpumask.h>
67 #include <linux/crash_dump.h>
68 #include <linux/kstrtox.h>
69 #include <linux/sort.h>
70 #include <linux/stop_machine.h>
71 #include <linux/sysfs.h>
72 #include <linux/types.h>
73 #include <linux/minmax.h>
74 #include <linux/mm.h>
75 #include <linux/cpu.h>
76 #include <linux/kasan.h>
77 #include <linux/percpu.h>
78
79 #include <asm/cpu.h>
80 #include <asm/cpufeature.h>
81 #include <asm/cpu_ops.h>
82 #include <asm/fpsimd.h>
83 #include <asm/hwcap.h>
84 #include <asm/insn.h>
85 #include <asm/kvm_host.h>
86 #include <asm/mmu_context.h>
87 #include <asm/mte.h>
88 #include <asm/processor.h>
89 #include <asm/smp.h>
90 #include <asm/sysreg.h>
91 #include <asm/traps.h>
92 #include <asm/vectors.h>
93 #include <asm/virt.h>
94
95 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
96 static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly;
97
98 #ifdef CONFIG_COMPAT
99 #define COMPAT_ELF_HWCAP_DEFAULT \
100 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
101 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
102 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
103 COMPAT_HWCAP_LPAE)
104 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
105 unsigned int compat_elf_hwcap2 __read_mostly;
106 #endif
107
108 DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
109 EXPORT_SYMBOL(system_cpucaps);
110 static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS];
111
112 DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
113
114 /*
115 * arm64_use_ng_mappings must be placed in the .data section, otherwise it
116 * ends up in the .bss section where it is initialized in early_map_kernel()
117 * after the MMU (with the idmap) was enabled. create_init_idmap() - which
118 * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
119 * may end up generating an incorrect idmap page table attributes.
120 */
121 bool arm64_use_ng_mappings __read_mostly = false;
122 EXPORT_SYMBOL(arm64_use_ng_mappings);
123
124 DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
125
126 /*
127 * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
128 * support it?
129 */
130 static bool __read_mostly allow_mismatched_32bit_el0;
131
132 /*
133 * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
134 * seen at least one CPU capable of 32-bit EL0.
135 */
136 DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
137 EXPORT_SYMBOL(arm64_mismatched_32bit_el0);
138
139 /*
140 * Mask of CPUs supporting 32-bit EL0.
141 * Only valid if arm64_mismatched_32bit_el0 is enabled.
142 */
143 static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
144
dump_cpu_features(void)145 void dump_cpu_features(void)
146 {
147 /* file-wide pr_fmt adds "CPU features: " prefix */
148 pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps);
149 }
150
151 #define __ARM64_MAX_POSITIVE(reg, field) \
152 ((reg##_##field##_SIGNED ? \
153 BIT(reg##_##field##_WIDTH - 1) : \
154 BIT(reg##_##field##_WIDTH)) - 1)
155
156 #define __ARM64_MIN_NEGATIVE(reg, field) BIT(reg##_##field##_WIDTH - 1)
157
158 #define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value) \
159 .sys_reg = SYS_##reg, \
160 .field_pos = reg##_##field##_SHIFT, \
161 .field_width = reg##_##field##_WIDTH, \
162 .sign = reg##_##field##_SIGNED, \
163 .min_field_value = min_value, \
164 .max_field_value = max_value,
165
166 /*
167 * ARM64_CPUID_FIELDS() encodes a field with a range from min_value to
168 * an implicit maximum that depends on the sign-ess of the field.
169 *
170 * An unsigned field will be capped at all ones, while a signed field
171 * will be limited to the positive half only.
172 */
173 #define ARM64_CPUID_FIELDS(reg, field, min_value) \
174 __ARM64_CPUID_FIELDS(reg, field, \
175 SYS_FIELD_VALUE(reg, field, min_value), \
176 __ARM64_MAX_POSITIVE(reg, field))
177
178 /*
179 * ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an
180 * implicit minimal value to max_value. This should be used when
181 * matching a non-implemented property.
182 */
183 #define ARM64_CPUID_FIELDS_NEG(reg, field, max_value) \
184 __ARM64_CPUID_FIELDS(reg, field, \
185 __ARM64_MIN_NEGATIVE(reg, field), \
186 SYS_FIELD_VALUE(reg, field, max_value))
187
188 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
189 { \
190 .sign = SIGNED, \
191 .visible = VISIBLE, \
192 .strict = STRICT, \
193 .type = TYPE, \
194 .shift = SHIFT, \
195 .width = WIDTH, \
196 .safe_val = SAFE_VAL, \
197 }
198
199 /* Define a feature with unsigned values */
200 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
201 __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
202
203 /* Define a feature with a signed value */
204 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
205 __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
206
207 #define ARM64_FTR_END \
208 { \
209 .width = 0, \
210 }
211
212 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
213
214 static bool __system_matches_cap(unsigned int n);
215
216 /*
217 * NOTE: Any changes to the visibility of features should be kept in
218 * sync with the documentation of the CPU feature register ABI.
219 */
220 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
221 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
222 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
223 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
224 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
225 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
226 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
227 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
228 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
229 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
230 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
231 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
232 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
233 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
234 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
235 ARM64_FTR_END,
236 };
237
238 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
239 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
240 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
241 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
242 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
243 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0),
244 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0),
245 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0),
246 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
247 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0),
248 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
249 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0),
250 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0),
251 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0),
252 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0),
253 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
254 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0),
255 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
256 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0),
257 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0),
258 ARM64_FTR_END,
259 };
260
261 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
262 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_LUT_SHIFT, 4, 0),
263 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
264 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
265 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
266 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
267 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
268 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
269 FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
270 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
271 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
272 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
273 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
274 ARM64_FTR_END,
275 };
276
277 static const struct arm64_ftr_bits ftr_id_aa64isar3[] = {
278 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0),
279 ARM64_FTR_END,
280 };
281
282 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
283 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
284 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
285 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
286 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
287 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
288 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
289 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
290 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
291 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
292 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
293 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI),
294 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
295 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
296 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
297 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_EL1_IMP),
298 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_EL0_IMP),
299 ARM64_FTR_END,
300 };
301
302 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
303 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
304 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
305 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
306 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
307 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
308 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
309 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI),
310 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
311 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
312 ARM64_FTR_END,
313 };
314
315 static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
316 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
317 ARM64_FTR_END,
318 };
319
320 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
321 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
322 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
323 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
324 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0),
325 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
326 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0),
327 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
328 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0),
329 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
330 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
331 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
332 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0),
333 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
334 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
335 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
336 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
337 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
338 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0),
339 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
340 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0),
341 ARM64_FTR_END,
342 };
343
344 static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
345 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
346 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
347 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
348 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_LUTv2_SHIFT, 1, 0),
349 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
350 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
351 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
352 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
353 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
354 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0),
355 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
356 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0),
357 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
358 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0),
359 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
360 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0),
361 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
362 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F16_SHIFT, 1, 0),
363 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
364 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F32_SHIFT, 1, 0),
365 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
366 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
367 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
368 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
369 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
370 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0),
371 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
372 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0),
373 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
374 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
375 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
376 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8FMA_SHIFT, 1, 0),
377 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
378 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP4_SHIFT, 1, 0),
379 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
380 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP2_SHIFT, 1, 0),
381 ARM64_FTR_END,
382 };
383
384 static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = {
385 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8CVT_SHIFT, 1, 0),
386 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8FMA_SHIFT, 1, 0),
387 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP4_SHIFT, 1, 0),
388 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP2_SHIFT, 1, 0),
389 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E4M3_SHIFT, 1, 0),
390 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E5M2_SHIFT, 1, 0),
391 ARM64_FTR_END,
392 };
393
394 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
395 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
396 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
397 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
398 /*
399 * Page size not being supported at Stage-2 is not fatal. You
400 * just give up KVM if PAGE_SIZE isn't supported there. Go fix
401 * your favourite nesting hypervisor.
402 *
403 * There is a small corner case where the hypervisor explicitly
404 * advertises a given granule size at Stage-2 (value 2) on some
405 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
406 * vCPUs. Although this is not forbidden by the architecture, it
407 * indicates that the hypervisor is being silly (or buggy).
408 *
409 * We make no effort to cope with this and pretend that if these
410 * fields are inconsistent across vCPUs, then it isn't worth
411 * trying to bring KVM up.
412 */
413 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
414 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
415 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
416 /*
417 * We already refuse to boot CPUs that don't support our configured
418 * page size, so we can only detect mismatches for a page size other
419 * than the one we're currently using. Unfortunately, SoCs like this
420 * exist in the wild so, even though we don't like it, we'll have to go
421 * along with it and treat them as non-strict.
422 */
423 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
424 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
425 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),
426
427 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
428 /* Linux shouldn't care about secure memory */
429 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
430 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
431 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
432 /*
433 * Differing PARange is fine as long as all peripherals and memory are mapped
434 * within the minimum PARange of all CPUs
435 */
436 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
437 ARM64_FTR_END,
438 };
439
440 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
441 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ECBHB_SHIFT, 4, 0),
442 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
443 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
444 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
445 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
446 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
447 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
448 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
449 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
450 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
451 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
452 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
453 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
454 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
455 ARM64_FTR_END,
456 };
457
458 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
459 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
460 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
461 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
462 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
463 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
464 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
465 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
466 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
467 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
468 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
469 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
470 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
471 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
472 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
473 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
474 ARM64_FTR_END,
475 };
476
477 static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
478 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE),
479 FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0),
480 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
481 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
482 ARM64_FTR_END,
483 };
484
485 static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
486 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
487 ARM64_FTR_END,
488 };
489
490 static const struct arm64_ftr_bits ftr_ctr[] = {
491 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
492 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
493 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1),
494 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
495 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
496 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
497 /*
498 * Linux can handle differing I-cache policies. Userspace JITs will
499 * make use of *minLine.
500 * If we have differing I-cache policies, report it as the weakest - VIPT.
501 */
502 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT), /* L1Ip */
503 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
504 ARM64_FTR_END,
505 };
506
507 static struct arm64_ftr_override __ro_after_init no_override = { };
508
509 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
510 .name = "SYS_CTR_EL0",
511 .ftr_bits = ftr_ctr,
512 .override = &no_override,
513 };
514
515 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
516 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf),
517 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0),
518 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0),
519 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0),
520 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0),
521 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf),
522 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0),
523 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0),
524 ARM64_FTR_END,
525 };
526
527 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
528 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
529 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
530 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
531 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
532 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
533 /*
534 * We can instantiate multiple PMU instances with different levels
535 * of support.
536 */
537 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
538 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
539 ARM64_FTR_END,
540 };
541
542 static const struct arm64_ftr_bits ftr_mvfr0[] = {
543 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0),
544 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0),
545 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0),
546 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0),
547 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0),
548 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0),
549 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0),
550 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0),
551 ARM64_FTR_END,
552 };
553
554 static const struct arm64_ftr_bits ftr_mvfr1[] = {
555 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0),
556 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
557 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
558 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0),
559 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0),
560 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0),
561 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0),
562 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0),
563 ARM64_FTR_END,
564 };
565
566 static const struct arm64_ftr_bits ftr_mvfr2[] = {
567 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0),
568 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0),
569 ARM64_FTR_END,
570 };
571
572 static const struct arm64_ftr_bits ftr_dczid[] = {
573 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1),
574 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0),
575 ARM64_FTR_END,
576 };
577
578 static const struct arm64_ftr_bits ftr_gmid[] = {
579 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0),
580 ARM64_FTR_END,
581 };
582
583 static const struct arm64_ftr_bits ftr_id_isar0[] = {
584 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0),
585 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0),
586 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0),
587 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0),
588 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0),
589 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0),
590 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0),
591 ARM64_FTR_END,
592 };
593
594 static const struct arm64_ftr_bits ftr_id_isar5[] = {
595 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0),
596 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0),
597 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0),
598 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0),
599 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0),
600 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0),
601 ARM64_FTR_END,
602 };
603
604 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
605 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0),
606 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0),
607 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0),
608 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0),
609 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0),
610 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0),
611 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0),
612
613 /*
614 * SpecSEI = 1 indicates that the PE might generate an SError on an
615 * external abort on speculative read. It is safe to assume that an
616 * SError might be generated than it will not be. Hence it has been
617 * classified as FTR_HIGHER_SAFE.
618 */
619 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0),
620 ARM64_FTR_END,
621 };
622
623 static const struct arm64_ftr_bits ftr_id_isar4[] = {
624 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0),
625 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0),
626 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0),
627 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0),
628 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0),
629 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0),
630 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0),
631 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0),
632 ARM64_FTR_END,
633 };
634
635 static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
636 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0),
637 ARM64_FTR_END,
638 };
639
640 static const struct arm64_ftr_bits ftr_id_isar6[] = {
641 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
642 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
643 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0),
644 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
645 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
646 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
647 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0),
648 ARM64_FTR_END,
649 };
650
651 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
652 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0),
653 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0),
654 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0),
655 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0),
656 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0),
657 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0),
658 ARM64_FTR_END,
659 };
660
661 static const struct arm64_ftr_bits ftr_id_pfr1[] = {
662 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0),
663 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0),
664 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0),
665 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0),
666 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0),
667 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0),
668 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0),
669 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0),
670 ARM64_FTR_END,
671 };
672
673 static const struct arm64_ftr_bits ftr_id_pfr2[] = {
674 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
675 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0),
676 ARM64_FTR_END,
677 };
678
679 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
680 /* [31:28] TraceFilt */
681 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0),
682 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0),
683 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0),
684 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0),
685 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0),
686 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0),
687 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0),
688 ARM64_FTR_END,
689 };
690
691 static const struct arm64_ftr_bits ftr_id_dfr1[] = {
692 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0),
693 ARM64_FTR_END,
694 };
695
696 static const struct arm64_ftr_bits ftr_mpamidr[] = {
697 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
698 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
699 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
700 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
701 ARM64_FTR_END,
702 };
703
704 /*
705 * Common ftr bits for a 32bit register with all hidden, strict
706 * attributes, with 4bit feature fields and a default safe value of
707 * 0. Covers the following 32bit registers:
708 * id_isar[1-3], id_mmfr[1-3]
709 */
710 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
711 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
712 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
713 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
714 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
715 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
716 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
717 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
718 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
719 ARM64_FTR_END,
720 };
721
722 /* Table for a single 32bit feature value */
723 static const struct arm64_ftr_bits ftr_single32[] = {
724 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
725 ARM64_FTR_END,
726 };
727
728 static const struct arm64_ftr_bits ftr_raz[] = {
729 ARM64_FTR_END,
730 };
731
732 #define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
733 .sys_id = id, \
734 .reg = &(struct arm64_ftr_reg){ \
735 .name = id_str, \
736 .override = (ovr), \
737 .ftr_bits = &((table)[0]), \
738 }}
739
740 #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
741 __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
742
743 #define ARM64_FTR_REG(id, table) \
744 __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
745
746 struct arm64_ftr_override id_aa64mmfr0_override;
747 struct arm64_ftr_override id_aa64mmfr1_override;
748 struct arm64_ftr_override id_aa64mmfr2_override;
749 struct arm64_ftr_override id_aa64pfr0_override;
750 struct arm64_ftr_override id_aa64pfr1_override;
751 struct arm64_ftr_override id_aa64zfr0_override;
752 struct arm64_ftr_override id_aa64smfr0_override;
753 struct arm64_ftr_override id_aa64isar1_override;
754 struct arm64_ftr_override id_aa64isar2_override;
755
756 struct arm64_ftr_override arm64_sw_feature_override;
757
758 static const struct __ftr_reg_entry {
759 u32 sys_id;
760 struct arm64_ftr_reg *reg;
761 } arm64_ftr_regs[] = {
762
763 /* Op1 = 0, CRn = 0, CRm = 1 */
764 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
765 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
766 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
767 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
768 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
769 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
770 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
771
772 /* Op1 = 0, CRn = 0, CRm = 2 */
773 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
774 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
775 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
776 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
777 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
778 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
779 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
780 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
781
782 /* Op1 = 0, CRn = 0, CRm = 3 */
783 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0),
784 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1),
785 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
786 ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
787 ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
788 ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
789
790 /* Op1 = 0, CRn = 0, CRm = 4 */
791 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0,
792 &id_aa64pfr0_override),
793 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
794 &id_aa64pfr1_override),
795 ARM64_FTR_REG(SYS_ID_AA64PFR2_EL1, ftr_id_aa64pfr2),
796 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
797 &id_aa64zfr0_override),
798 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
799 &id_aa64smfr0_override),
800 ARM64_FTR_REG(SYS_ID_AA64FPFR0_EL1, ftr_id_aa64fpfr0),
801
802 /* Op1 = 0, CRn = 0, CRm = 5 */
803 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
804 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
805
806 /* Op1 = 0, CRn = 0, CRm = 6 */
807 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
808 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
809 &id_aa64isar1_override),
810 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
811 &id_aa64isar2_override),
812 ARM64_FTR_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3),
813
814 /* Op1 = 0, CRn = 0, CRm = 7 */
815 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
816 &id_aa64mmfr0_override),
817 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
818 &id_aa64mmfr1_override),
819 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
820 &id_aa64mmfr2_override),
821 ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
822 ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
823
824 /* Op1 = 0, CRn = 10, CRm = 4 */
825 ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),
826
827 /* Op1 = 1, CRn = 0, CRm = 0 */
828 ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
829
830 /* Op1 = 3, CRn = 0, CRm = 0 */
831 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
832 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
833
834 /* Op1 = 3, CRn = 14, CRm = 0 */
835 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
836 };
837
search_cmp_ftr_reg(const void * id,const void * regp)838 static int search_cmp_ftr_reg(const void *id, const void *regp)
839 {
840 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
841 }
842
843 /*
844 * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
845 * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
846 * ascending order of sys_id, we use binary search to find a matching
847 * entry.
848 *
849 * returns - Upon success, matching ftr_reg entry for id.
850 * - NULL on failure. It is upto the caller to decide
851 * the impact of a failure.
852 */
get_arm64_ftr_reg_nowarn(u32 sys_id)853 static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
854 {
855 const struct __ftr_reg_entry *ret;
856
857 ret = bsearch((const void *)(unsigned long)sys_id,
858 arm64_ftr_regs,
859 ARRAY_SIZE(arm64_ftr_regs),
860 sizeof(arm64_ftr_regs[0]),
861 search_cmp_ftr_reg);
862 if (ret)
863 return ret->reg;
864 return NULL;
865 }
866
867 /*
868 * get_arm64_ftr_reg - Looks up a feature register entry using
869 * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
870 *
871 * returns - Upon success, matching ftr_reg entry for id.
872 * - NULL on failure but with an WARN_ON().
873 */
get_arm64_ftr_reg(u32 sys_id)874 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
875 {
876 struct arm64_ftr_reg *reg;
877
878 reg = get_arm64_ftr_reg_nowarn(sys_id);
879
880 /*
881 * Requesting a non-existent register search is an error. Warn
882 * and let the caller handle it.
883 */
884 WARN_ON(!reg);
885 return reg;
886 }
887
arm64_ftr_set_value(const struct arm64_ftr_bits * ftrp,s64 reg,s64 ftr_val)888 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
889 s64 ftr_val)
890 {
891 u64 mask = arm64_ftr_mask(ftrp);
892
893 reg &= ~mask;
894 reg |= (ftr_val << ftrp->shift) & mask;
895 return reg;
896 }
897
arm64_ftr_safe_value(const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)898 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
899 s64 cur)
900 {
901 s64 ret = 0;
902
903 switch (ftrp->type) {
904 case FTR_EXACT:
905 ret = ftrp->safe_val;
906 break;
907 case FTR_LOWER_SAFE:
908 ret = min(new, cur);
909 break;
910 case FTR_HIGHER_OR_ZERO_SAFE:
911 if (!cur || !new)
912 break;
913 fallthrough;
914 case FTR_HIGHER_SAFE:
915 ret = max(new, cur);
916 break;
917 default:
918 BUG();
919 }
920
921 return ret;
922 }
923
sort_ftr_regs(void)924 static void __init sort_ftr_regs(void)
925 {
926 unsigned int i;
927
928 for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
929 const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
930 const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
931 unsigned int j = 0;
932
933 /*
934 * Features here must be sorted in descending order with respect
935 * to their shift values and should not overlap with each other.
936 */
937 for (; ftr_bits->width != 0; ftr_bits++, j++) {
938 unsigned int width = ftr_reg->ftr_bits[j].width;
939 unsigned int shift = ftr_reg->ftr_bits[j].shift;
940 unsigned int prev_shift;
941
942 WARN((shift + width) > 64,
943 "%s has invalid feature at shift %d\n",
944 ftr_reg->name, shift);
945
946 /*
947 * Skip the first feature. There is nothing to
948 * compare against for now.
949 */
950 if (j == 0)
951 continue;
952
953 prev_shift = ftr_reg->ftr_bits[j - 1].shift;
954 WARN((shift + width) > prev_shift,
955 "%s has feature overlap at shift %d\n",
956 ftr_reg->name, shift);
957 }
958
959 /*
960 * Skip the first register. There is nothing to
961 * compare against for now.
962 */
963 if (i == 0)
964 continue;
965 /*
966 * Registers here must be sorted in ascending order with respect
967 * to sys_id for subsequent binary search in get_arm64_ftr_reg()
968 * to work correctly.
969 */
970 BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
971 }
972 }
973
974 /*
975 * Initialise the CPU feature register from Boot CPU values.
976 * Also initiliases the strict_mask for the register.
977 * Any bits that are not covered by an arm64_ftr_bits entry are considered
978 * RES0 for the system-wide value, and must strictly match.
979 */
init_cpu_ftr_reg(u32 sys_reg,u64 new)980 static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
981 {
982 u64 val = 0;
983 u64 strict_mask = ~0x0ULL;
984 u64 user_mask = 0;
985 u64 valid_mask = 0;
986
987 const struct arm64_ftr_bits *ftrp;
988 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
989
990 if (!reg)
991 return;
992
993 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
994 u64 ftr_mask = arm64_ftr_mask(ftrp);
995 s64 ftr_new = arm64_ftr_value(ftrp, new);
996 s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
997
998 if ((ftr_mask & reg->override->mask) == ftr_mask) {
999 s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
1000 char *str = NULL;
1001
1002 if (ftr_ovr != tmp) {
1003 /* Unsafe, remove the override */
1004 reg->override->mask &= ~ftr_mask;
1005 reg->override->val &= ~ftr_mask;
1006 tmp = ftr_ovr;
1007 str = "ignoring override";
1008 } else if (ftr_new != tmp) {
1009 /* Override was valid */
1010 ftr_new = tmp;
1011 str = "forced";
1012 } else if (ftr_ovr == tmp) {
1013 /* Override was the safe value */
1014 str = "already set";
1015 }
1016
1017 if (str)
1018 pr_warn("%s[%d:%d]: %s to %llx\n",
1019 reg->name,
1020 ftrp->shift + ftrp->width - 1,
1021 ftrp->shift, str,
1022 tmp & (BIT(ftrp->width) - 1));
1023 } else if ((ftr_mask & reg->override->val) == ftr_mask) {
1024 reg->override->val &= ~ftr_mask;
1025 pr_warn("%s[%d:%d]: impossible override, ignored\n",
1026 reg->name,
1027 ftrp->shift + ftrp->width - 1,
1028 ftrp->shift);
1029 }
1030
1031 val = arm64_ftr_set_value(ftrp, val, ftr_new);
1032
1033 valid_mask |= ftr_mask;
1034 if (!ftrp->strict)
1035 strict_mask &= ~ftr_mask;
1036 if (ftrp->visible)
1037 user_mask |= ftr_mask;
1038 else
1039 reg->user_val = arm64_ftr_set_value(ftrp,
1040 reg->user_val,
1041 ftrp->safe_val);
1042 }
1043
1044 val &= valid_mask;
1045
1046 reg->sys_val = val;
1047 reg->strict_mask = strict_mask;
1048 reg->user_mask = user_mask;
1049 }
1050
1051 extern const struct arm64_cpu_capabilities arm64_errata[];
1052 static const struct arm64_cpu_capabilities arm64_features[];
1053
1054 static void __init
init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities * caps)1055 init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
1056 {
1057 for (; caps->matches; caps++) {
1058 if (WARN(caps->capability >= ARM64_NCAPS,
1059 "Invalid capability %d\n", caps->capability))
1060 continue;
1061 if (WARN(cpucap_ptrs[caps->capability],
1062 "Duplicate entry for capability %d\n",
1063 caps->capability))
1064 continue;
1065 cpucap_ptrs[caps->capability] = caps;
1066 }
1067 }
1068
init_cpucap_indirect_list(void)1069 static void __init init_cpucap_indirect_list(void)
1070 {
1071 init_cpucap_indirect_list_from_array(arm64_features);
1072 init_cpucap_indirect_list_from_array(arm64_errata);
1073 }
1074
1075 static void __init setup_boot_cpu_capabilities(void);
1076
init_32bit_cpu_features(struct cpuinfo_32bit * info)1077 static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
1078 {
1079 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
1080 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
1081 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
1082 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
1083 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
1084 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
1085 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
1086 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
1087 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
1088 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
1089 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
1090 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
1091 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
1092 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
1093 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
1094 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
1095 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
1096 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
1097 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
1098 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
1099 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
1100 }
1101
1102 #ifdef CONFIG_ARM64_PSEUDO_NMI
1103 static bool enable_pseudo_nmi;
1104
early_enable_pseudo_nmi(char * p)1105 static int __init early_enable_pseudo_nmi(char *p)
1106 {
1107 return kstrtobool(p, &enable_pseudo_nmi);
1108 }
1109 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1110
detect_system_supports_pseudo_nmi(void)1111 static __init void detect_system_supports_pseudo_nmi(void)
1112 {
1113 struct device_node *np;
1114
1115 if (!enable_pseudo_nmi)
1116 return;
1117
1118 /*
1119 * Detect broken MediaTek firmware that doesn't properly save and
1120 * restore GIC priorities.
1121 */
1122 np = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1123 if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) {
1124 pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n");
1125 enable_pseudo_nmi = false;
1126 }
1127 of_node_put(np);
1128 }
1129 #else /* CONFIG_ARM64_PSEUDO_NMI */
detect_system_supports_pseudo_nmi(void)1130 static inline void detect_system_supports_pseudo_nmi(void) { }
1131 #endif
1132
init_cpu_features(struct cpuinfo_arm64 * info)1133 void __init init_cpu_features(struct cpuinfo_arm64 *info)
1134 {
1135 /* Before we start using the tables, make sure it is sorted */
1136 sort_ftr_regs();
1137
1138 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
1139 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
1140 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
1141 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
1142 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
1143 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
1144 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
1145 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
1146 init_cpu_ftr_reg(SYS_ID_AA64ISAR3_EL1, info->reg_id_aa64isar3);
1147 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
1148 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
1149 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
1150 init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3);
1151 init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4);
1152 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
1153 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
1154 init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2);
1155 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
1156 init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
1157 init_cpu_ftr_reg(SYS_ID_AA64FPFR0_EL1, info->reg_id_aa64fpfr0);
1158
1159 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
1160 init_32bit_cpu_features(&info->aarch32);
1161
1162 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
1163 id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1164 unsigned long cpacr = cpacr_save_enable_kernel_sve();
1165
1166 vec_init_vq_map(ARM64_VEC_SVE);
1167
1168 cpacr_restore(cpacr);
1169 }
1170
1171 if (IS_ENABLED(CONFIG_ARM64_SME) &&
1172 id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
1173 unsigned long cpacr = cpacr_save_enable_kernel_sme();
1174
1175 vec_init_vq_map(ARM64_VEC_SME);
1176
1177 cpacr_restore(cpacr);
1178 }
1179
1180 if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1181 info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
1182 init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
1183 }
1184
1185 if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
1186 init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
1187 }
1188
update_cpu_ftr_reg(struct arm64_ftr_reg * reg,u64 new)1189 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
1190 {
1191 const struct arm64_ftr_bits *ftrp;
1192
1193 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
1194 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
1195 s64 ftr_new = arm64_ftr_value(ftrp, new);
1196
1197 if (ftr_cur == ftr_new)
1198 continue;
1199 /* Find a safe value */
1200 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
1201 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
1202 }
1203
1204 }
1205
check_update_ftr_reg(u32 sys_id,int cpu,u64 val,u64 boot)1206 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
1207 {
1208 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
1209
1210 if (!regp)
1211 return 0;
1212
1213 update_cpu_ftr_reg(regp, val);
1214 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
1215 return 0;
1216 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
1217 regp->name, boot, cpu, val);
1218 return 1;
1219 }
1220
relax_cpu_ftr_reg(u32 sys_id,int field)1221 static void relax_cpu_ftr_reg(u32 sys_id, int field)
1222 {
1223 const struct arm64_ftr_bits *ftrp;
1224 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
1225
1226 if (!regp)
1227 return;
1228
1229 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
1230 if (ftrp->shift == field) {
1231 regp->strict_mask &= ~arm64_ftr_mask(ftrp);
1232 break;
1233 }
1234 }
1235
1236 /* Bogus field? */
1237 WARN_ON(!ftrp->width);
1238 }
1239
lazy_init_32bit_cpu_features(struct cpuinfo_arm64 * info,struct cpuinfo_arm64 * boot)1240 static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
1241 struct cpuinfo_arm64 *boot)
1242 {
1243 static bool boot_cpu_32bit_regs_overridden = false;
1244
1245 if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
1246 return;
1247
1248 if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
1249 return;
1250
1251 boot->aarch32 = info->aarch32;
1252 init_32bit_cpu_features(&boot->aarch32);
1253 boot_cpu_32bit_regs_overridden = true;
1254 }
1255
update_32bit_cpu_features(int cpu,struct cpuinfo_32bit * info,struct cpuinfo_32bit * boot)1256 static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
1257 struct cpuinfo_32bit *boot)
1258 {
1259 int taint = 0;
1260 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1261
1262 /*
1263 * If we don't have AArch32 at EL1, then relax the strictness of
1264 * EL1-dependent register fields to avoid spurious sanity check fails.
1265 */
1266 if (!id_aa64pfr0_32bit_el1(pfr0)) {
1267 relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_EL1_SMC_SHIFT);
1268 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virt_frac_SHIFT);
1269 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Sec_frac_SHIFT);
1270 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virtualization_SHIFT);
1271 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Security_SHIFT);
1272 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_ProgMod_SHIFT);
1273 }
1274
1275 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
1276 info->reg_id_dfr0, boot->reg_id_dfr0);
1277 taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
1278 info->reg_id_dfr1, boot->reg_id_dfr1);
1279 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
1280 info->reg_id_isar0, boot->reg_id_isar0);
1281 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
1282 info->reg_id_isar1, boot->reg_id_isar1);
1283 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
1284 info->reg_id_isar2, boot->reg_id_isar2);
1285 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
1286 info->reg_id_isar3, boot->reg_id_isar3);
1287 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
1288 info->reg_id_isar4, boot->reg_id_isar4);
1289 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
1290 info->reg_id_isar5, boot->reg_id_isar5);
1291 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
1292 info->reg_id_isar6, boot->reg_id_isar6);
1293
1294 /*
1295 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
1296 * ACTLR formats could differ across CPUs and therefore would have to
1297 * be trapped for virtualization anyway.
1298 */
1299 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
1300 info->reg_id_mmfr0, boot->reg_id_mmfr0);
1301 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
1302 info->reg_id_mmfr1, boot->reg_id_mmfr1);
1303 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
1304 info->reg_id_mmfr2, boot->reg_id_mmfr2);
1305 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
1306 info->reg_id_mmfr3, boot->reg_id_mmfr3);
1307 taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
1308 info->reg_id_mmfr4, boot->reg_id_mmfr4);
1309 taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
1310 info->reg_id_mmfr5, boot->reg_id_mmfr5);
1311 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
1312 info->reg_id_pfr0, boot->reg_id_pfr0);
1313 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
1314 info->reg_id_pfr1, boot->reg_id_pfr1);
1315 taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
1316 info->reg_id_pfr2, boot->reg_id_pfr2);
1317 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
1318 info->reg_mvfr0, boot->reg_mvfr0);
1319 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
1320 info->reg_mvfr1, boot->reg_mvfr1);
1321 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
1322 info->reg_mvfr2, boot->reg_mvfr2);
1323
1324 return taint;
1325 }
1326
1327 /*
1328 * Update system wide CPU feature registers with the values from a
1329 * non-boot CPU. Also performs SANITY checks to make sure that there
1330 * aren't any insane variations from that of the boot CPU.
1331 */
update_cpu_features(int cpu,struct cpuinfo_arm64 * info,struct cpuinfo_arm64 * boot)1332 void update_cpu_features(int cpu,
1333 struct cpuinfo_arm64 *info,
1334 struct cpuinfo_arm64 *boot)
1335 {
1336 int taint = 0;
1337
1338 /*
1339 * The kernel can handle differing I-cache policies, but otherwise
1340 * caches should look identical. Userspace JITs will make use of
1341 * *minLine.
1342 */
1343 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
1344 info->reg_ctr, boot->reg_ctr);
1345
1346 /*
1347 * Userspace may perform DC ZVA instructions. Mismatched block sizes
1348 * could result in too much or too little memory being zeroed if a
1349 * process is preempted and migrated between CPUs.
1350 */
1351 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
1352 info->reg_dczid, boot->reg_dczid);
1353
1354 /* If different, timekeeping will be broken (especially with KVM) */
1355 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
1356 info->reg_cntfrq, boot->reg_cntfrq);
1357
1358 /*
1359 * The kernel uses self-hosted debug features and expects CPUs to
1360 * support identical debug features. We presently need CTX_CMPs, WRPs,
1361 * and BRPs to be identical.
1362 * ID_AA64DFR1 is currently RES0.
1363 */
1364 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
1365 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
1366 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
1367 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
1368 /*
1369 * Even in big.LITTLE, processors should be identical instruction-set
1370 * wise.
1371 */
1372 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
1373 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1374 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1375 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1376 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
1377 info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
1378 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu,
1379 info->reg_id_aa64isar3, boot->reg_id_aa64isar3);
1380
1381 /*
1382 * Differing PARange support is fine as long as all peripherals and
1383 * memory are mapped within the minimum PARange of all CPUs.
1384 * Linux should not care about secure memory.
1385 */
1386 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
1387 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
1388 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
1389 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
1390 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
1391 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
1392 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu,
1393 info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3);
1394
1395 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
1396 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
1397 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
1398 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
1399 taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu,
1400 info->reg_id_aa64pfr2, boot->reg_id_aa64pfr2);
1401
1402 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
1403 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
1404
1405 taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
1406 info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
1407
1408 taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu,
1409 info->reg_id_aa64fpfr0, boot->reg_id_aa64fpfr0);
1410
1411 /* Probe vector lengths */
1412 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
1413 id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1414 if (!system_capabilities_finalized()) {
1415 unsigned long cpacr = cpacr_save_enable_kernel_sve();
1416
1417 vec_update_vq_map(ARM64_VEC_SVE);
1418
1419 cpacr_restore(cpacr);
1420 }
1421 }
1422
1423 if (IS_ENABLED(CONFIG_ARM64_SME) &&
1424 id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
1425 unsigned long cpacr = cpacr_save_enable_kernel_sme();
1426
1427 /* Probe vector lengths */
1428 if (!system_capabilities_finalized())
1429 vec_update_vq_map(ARM64_VEC_SME);
1430
1431 cpacr_restore(cpacr);
1432 }
1433
1434 if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1435 info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
1436 taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
1437 info->reg_mpamidr, boot->reg_mpamidr);
1438 }
1439
1440 /*
1441 * The kernel uses the LDGM/STGM instructions and the number of tags
1442 * they read/write depends on the GMID_EL1.BS field. Check that the
1443 * value is the same on all CPUs.
1444 */
1445 if (IS_ENABLED(CONFIG_ARM64_MTE) &&
1446 id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
1447 taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
1448 info->reg_gmid, boot->reg_gmid);
1449 }
1450
1451 /*
1452 * If we don't have AArch32 at all then skip the checks entirely
1453 * as the register values may be UNKNOWN and we're not going to be
1454 * using them for anything.
1455 *
1456 * This relies on a sanitised view of the AArch64 ID registers
1457 * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
1458 */
1459 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
1460 lazy_init_32bit_cpu_features(info, boot);
1461 taint |= update_32bit_cpu_features(cpu, &info->aarch32,
1462 &boot->aarch32);
1463 }
1464
1465 /*
1466 * Mismatched CPU features are a recipe for disaster. Don't even
1467 * pretend to support them.
1468 */
1469 if (taint) {
1470 pr_warn_once("Unsupported CPU feature variation detected.\n");
1471 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1472 }
1473 }
1474
read_sanitised_ftr_reg(u32 id)1475 u64 read_sanitised_ftr_reg(u32 id)
1476 {
1477 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
1478
1479 if (!regp)
1480 return 0;
1481 return regp->sys_val;
1482 }
1483 EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
1484
1485 #define read_sysreg_case(r) \
1486 case r: val = read_sysreg_s(r); break;
1487
1488 /*
1489 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1490 * Read the system register on the current CPU
1491 */
__read_sysreg_by_encoding(u32 sys_id)1492 u64 __read_sysreg_by_encoding(u32 sys_id)
1493 {
1494 struct arm64_ftr_reg *regp;
1495 u64 val;
1496
1497 switch (sys_id) {
1498 read_sysreg_case(SYS_ID_PFR0_EL1);
1499 read_sysreg_case(SYS_ID_PFR1_EL1);
1500 read_sysreg_case(SYS_ID_PFR2_EL1);
1501 read_sysreg_case(SYS_ID_DFR0_EL1);
1502 read_sysreg_case(SYS_ID_DFR1_EL1);
1503 read_sysreg_case(SYS_ID_MMFR0_EL1);
1504 read_sysreg_case(SYS_ID_MMFR1_EL1);
1505 read_sysreg_case(SYS_ID_MMFR2_EL1);
1506 read_sysreg_case(SYS_ID_MMFR3_EL1);
1507 read_sysreg_case(SYS_ID_MMFR4_EL1);
1508 read_sysreg_case(SYS_ID_MMFR5_EL1);
1509 read_sysreg_case(SYS_ID_ISAR0_EL1);
1510 read_sysreg_case(SYS_ID_ISAR1_EL1);
1511 read_sysreg_case(SYS_ID_ISAR2_EL1);
1512 read_sysreg_case(SYS_ID_ISAR3_EL1);
1513 read_sysreg_case(SYS_ID_ISAR4_EL1);
1514 read_sysreg_case(SYS_ID_ISAR5_EL1);
1515 read_sysreg_case(SYS_ID_ISAR6_EL1);
1516 read_sysreg_case(SYS_MVFR0_EL1);
1517 read_sysreg_case(SYS_MVFR1_EL1);
1518 read_sysreg_case(SYS_MVFR2_EL1);
1519
1520 read_sysreg_case(SYS_ID_AA64PFR0_EL1);
1521 read_sysreg_case(SYS_ID_AA64PFR1_EL1);
1522 read_sysreg_case(SYS_ID_AA64PFR2_EL1);
1523 read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
1524 read_sysreg_case(SYS_ID_AA64SMFR0_EL1);
1525 read_sysreg_case(SYS_ID_AA64FPFR0_EL1);
1526 read_sysreg_case(SYS_ID_AA64DFR0_EL1);
1527 read_sysreg_case(SYS_ID_AA64DFR1_EL1);
1528 read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
1529 read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
1530 read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1531 read_sysreg_case(SYS_ID_AA64MMFR3_EL1);
1532 read_sysreg_case(SYS_ID_AA64MMFR4_EL1);
1533 read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1534 read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1535 read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
1536 read_sysreg_case(SYS_ID_AA64ISAR3_EL1);
1537
1538 read_sysreg_case(SYS_CNTFRQ_EL0);
1539 read_sysreg_case(SYS_CTR_EL0);
1540 read_sysreg_case(SYS_DCZID_EL0);
1541
1542 default:
1543 BUG();
1544 return 0;
1545 }
1546
1547 regp = get_arm64_ftr_reg(sys_id);
1548 if (regp) {
1549 val &= ~regp->override->mask;
1550 val |= (regp->override->val & regp->override->mask);
1551 }
1552
1553 return val;
1554 }
1555
1556 #include <linux/irqchip/arm-gic-v3.h>
1557
1558 static bool
has_always(const struct arm64_cpu_capabilities * entry,int scope)1559 has_always(const struct arm64_cpu_capabilities *entry, int scope)
1560 {
1561 return true;
1562 }
1563
1564 static bool
feature_matches(u64 reg,const struct arm64_cpu_capabilities * entry)1565 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
1566 {
1567 int val, min, max;
1568 u64 tmp;
1569
1570 val = cpuid_feature_extract_field_width(reg, entry->field_pos,
1571 entry->field_width,
1572 entry->sign);
1573
1574 tmp = entry->min_field_value;
1575 tmp <<= entry->field_pos;
1576
1577 min = cpuid_feature_extract_field_width(tmp, entry->field_pos,
1578 entry->field_width,
1579 entry->sign);
1580
1581 tmp = entry->max_field_value;
1582 tmp <<= entry->field_pos;
1583
1584 max = cpuid_feature_extract_field_width(tmp, entry->field_pos,
1585 entry->field_width,
1586 entry->sign);
1587
1588 return val >= min && val <= max;
1589 }
1590
1591 static u64
read_scoped_sysreg(const struct arm64_cpu_capabilities * entry,int scope)1592 read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope)
1593 {
1594 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1595 if (scope == SCOPE_SYSTEM)
1596 return read_sanitised_ftr_reg(entry->sys_reg);
1597 else
1598 return __read_sysreg_by_encoding(entry->sys_reg);
1599 }
1600
1601 static bool
has_user_cpuid_feature(const struct arm64_cpu_capabilities * entry,int scope)1602 has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
1603 {
1604 int mask;
1605 struct arm64_ftr_reg *regp;
1606 u64 val = read_scoped_sysreg(entry, scope);
1607
1608 regp = get_arm64_ftr_reg(entry->sys_reg);
1609 if (!regp)
1610 return false;
1611
1612 mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask,
1613 entry->field_pos,
1614 entry->field_width);
1615 if (!mask)
1616 return false;
1617
1618 return feature_matches(val, entry);
1619 }
1620
1621 static bool
has_cpuid_feature(const struct arm64_cpu_capabilities * entry,int scope)1622 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
1623 {
1624 u64 val = read_scoped_sysreg(entry, scope);
1625 return feature_matches(val, entry);
1626 }
1627
system_32bit_el0_cpumask(void)1628 const struct cpumask *system_32bit_el0_cpumask(void)
1629 {
1630 if (!system_supports_32bit_el0())
1631 return cpu_none_mask;
1632
1633 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
1634 return cpu_32bit_el0_mask;
1635
1636 return cpu_possible_mask;
1637 }
1638 EXPORT_SYMBOL_GPL(system_32bit_el0_cpumask);
1639
parse_32bit_el0_param(char * str)1640 static int __init parse_32bit_el0_param(char *str)
1641 {
1642 allow_mismatched_32bit_el0 = true;
1643 return 0;
1644 }
1645 early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param);
1646
aarch32_el0_show(struct device * dev,struct device_attribute * attr,char * buf)1647 static ssize_t aarch32_el0_show(struct device *dev,
1648 struct device_attribute *attr, char *buf)
1649 {
1650 const struct cpumask *mask = system_32bit_el0_cpumask();
1651
1652 return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask));
1653 }
1654 static const DEVICE_ATTR_RO(aarch32_el0);
1655
aarch32_el0_sysfs_init(void)1656 static int __init aarch32_el0_sysfs_init(void)
1657 {
1658 struct device *dev_root;
1659 int ret = 0;
1660
1661 if (!allow_mismatched_32bit_el0)
1662 return 0;
1663
1664 dev_root = bus_get_dev_root(&cpu_subsys);
1665 if (dev_root) {
1666 ret = device_create_file(dev_root, &dev_attr_aarch32_el0);
1667 put_device(dev_root);
1668 }
1669 return ret;
1670 }
1671 device_initcall(aarch32_el0_sysfs_init);
1672
has_32bit_el0(const struct arm64_cpu_capabilities * entry,int scope)1673 static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
1674 {
1675 if (!has_cpuid_feature(entry, scope))
1676 return allow_mismatched_32bit_el0;
1677
1678 if (scope == SCOPE_SYSTEM)
1679 pr_info("detected: 32-bit EL0 Support\n");
1680
1681 return true;
1682 }
1683
has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities * entry,int scope)1684 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
1685 {
1686 bool has_sre;
1687
1688 if (!has_cpuid_feature(entry, scope))
1689 return false;
1690
1691 has_sre = gic_enable_sre();
1692 if (!has_sre)
1693 pr_warn_once("%s present but disabled by higher exception level\n",
1694 entry->desc);
1695
1696 return has_sre;
1697 }
1698
has_cache_idc(const struct arm64_cpu_capabilities * entry,int scope)1699 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
1700 int scope)
1701 {
1702 u64 ctr;
1703
1704 if (scope == SCOPE_SYSTEM)
1705 ctr = arm64_ftr_reg_ctrel0.sys_val;
1706 else
1707 ctr = read_cpuid_effective_cachetype();
1708
1709 return ctr & BIT(CTR_EL0_IDC_SHIFT);
1710 }
1711
cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities * __unused)1712 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
1713 {
1714 /*
1715 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
1716 * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
1717 * to the CTR_EL0 on this CPU and emulate it with the real/safe
1718 * value.
1719 */
1720 if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT)))
1721 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
1722 }
1723
has_cache_dic(const struct arm64_cpu_capabilities * entry,int scope)1724 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
1725 int scope)
1726 {
1727 u64 ctr;
1728
1729 if (scope == SCOPE_SYSTEM)
1730 ctr = arm64_ftr_reg_ctrel0.sys_val;
1731 else
1732 ctr = read_cpuid_cachetype();
1733
1734 return ctr & BIT(CTR_EL0_DIC_SHIFT);
1735 }
1736
1737 static bool __maybe_unused
has_useable_cnp(const struct arm64_cpu_capabilities * entry,int scope)1738 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
1739 {
1740 /*
1741 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
1742 * may share TLB entries with a CPU stuck in the crashed
1743 * kernel.
1744 */
1745 if (is_kdump_kernel())
1746 return false;
1747
1748 if (cpus_have_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
1749 return false;
1750
1751 return has_cpuid_feature(entry, scope);
1752 }
1753
1754 static bool __meltdown_safe = true;
1755 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
1756
unmap_kernel_at_el0(const struct arm64_cpu_capabilities * entry,int scope)1757 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
1758 int scope)
1759 {
1760 /* List of CPUs that are not vulnerable and don't need KPTI */
1761 static const struct midr_range kpti_safe_list[] = {
1762 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1763 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1764 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
1765 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1766 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1767 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1768 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1769 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1770 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1771 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
1772 MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1773 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
1774 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
1775 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1776 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
1777 { /* sentinel */ }
1778 };
1779 char const *str = "kpti command line option";
1780 bool meltdown_safe;
1781
1782 meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
1783
1784 /* Defer to CPU feature registers */
1785 if (has_cpuid_feature(entry, scope))
1786 meltdown_safe = true;
1787
1788 if (!meltdown_safe)
1789 __meltdown_safe = false;
1790
1791 /*
1792 * For reasons that aren't entirely clear, enabling KPTI on Cavium
1793 * ThunderX leads to apparent I-cache corruption of kernel text, which
1794 * ends as well as you might imagine. Don't even try. We cannot rely
1795 * on the cpus_have_*cap() helpers here to detect the CPU erratum
1796 * because cpucap detection order may change. However, since we know
1797 * affected CPUs are always in a homogeneous configuration, it is
1798 * safe to rely on this_cpu_has_cap() here.
1799 */
1800 if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1801 str = "ARM64_WORKAROUND_CAVIUM_27456";
1802 __kpti_forced = -1;
1803 }
1804
1805 /* Useful for KASLR robustness */
1806 if (kaslr_enabled() && kaslr_requires_kpti()) {
1807 if (!__kpti_forced) {
1808 str = "KASLR";
1809 __kpti_forced = 1;
1810 }
1811 }
1812
1813 if (cpu_mitigations_off() && !__kpti_forced) {
1814 str = "mitigations=off";
1815 __kpti_forced = -1;
1816 }
1817
1818 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1819 pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1820 return false;
1821 }
1822
1823 /* Forced? */
1824 if (__kpti_forced) {
1825 pr_info_once("kernel page table isolation forced %s by %s\n",
1826 __kpti_forced > 0 ? "ON" : "OFF", str);
1827 return __kpti_forced > 0;
1828 }
1829
1830 return !meltdown_safe;
1831 }
1832
has_nv1(const struct arm64_cpu_capabilities * entry,int scope)1833 static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
1834 {
1835 /*
1836 * Although the Apple M2 family appears to support NV1, the
1837 * PTW barfs on the nVHE EL2 S1 page table format. Pretend
1838 * that it doesn't support NV1 at all.
1839 */
1840 static const struct midr_range nv1_ni_list[] = {
1841 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
1842 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
1843 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
1844 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
1845 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
1846 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
1847 {}
1848 };
1849
1850 return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
1851 !(has_cpuid_feature(entry, scope) ||
1852 is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
1853 }
1854
1855 #if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
has_lpa2_at_stage1(u64 mmfr0)1856 static bool has_lpa2_at_stage1(u64 mmfr0)
1857 {
1858 unsigned int tgran;
1859
1860 tgran = cpuid_feature_extract_unsigned_field(mmfr0,
1861 ID_AA64MMFR0_EL1_TGRAN_SHIFT);
1862 return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
1863 }
1864
has_lpa2_at_stage2(u64 mmfr0)1865 static bool has_lpa2_at_stage2(u64 mmfr0)
1866 {
1867 unsigned int tgran;
1868
1869 tgran = cpuid_feature_extract_unsigned_field(mmfr0,
1870 ID_AA64MMFR0_EL1_TGRAN_2_SHIFT);
1871 return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2;
1872 }
1873
has_lpa2(const struct arm64_cpu_capabilities * entry,int scope)1874 static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
1875 {
1876 u64 mmfr0;
1877
1878 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1879 return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0);
1880 }
1881 #else
has_lpa2(const struct arm64_cpu_capabilities * entry,int scope)1882 static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
1883 {
1884 return false;
1885 }
1886 #endif
1887
1888 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1889 #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
1890
1891 extern
1892 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
1893 phys_addr_t size, pgprot_t prot,
1894 phys_addr_t (*pgtable_alloc)(int), int flags);
1895
1896 static phys_addr_t __initdata kpti_ng_temp_alloc;
1897
kpti_ng_pgd_alloc(int shift)1898 static phys_addr_t __init kpti_ng_pgd_alloc(int shift)
1899 {
1900 kpti_ng_temp_alloc -= PAGE_SIZE;
1901 return kpti_ng_temp_alloc;
1902 }
1903
__kpti_install_ng_mappings(void * __unused)1904 static int __init __kpti_install_ng_mappings(void *__unused)
1905 {
1906 typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
1907 extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1908 kpti_remap_fn *remap_fn;
1909
1910 int cpu = smp_processor_id();
1911 int levels = CONFIG_PGTABLE_LEVELS;
1912 int order = order_base_2(levels);
1913 u64 kpti_ng_temp_pgd_pa = 0;
1914 pgd_t *kpti_ng_temp_pgd;
1915 u64 alloc = 0;
1916
1917 if (levels == 5 && !pgtable_l5_enabled())
1918 levels = 4;
1919 else if (levels == 4 && !pgtable_l4_enabled())
1920 levels = 3;
1921
1922 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
1923
1924 if (!cpu) {
1925 alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
1926 kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
1927 kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
1928
1929 //
1930 // Create a minimal page table hierarchy that permits us to map
1931 // the swapper page tables temporarily as we traverse them.
1932 //
1933 // The physical pages are laid out as follows:
1934 //
1935 // +--------+-/-------+-/------ +-/------ +-\\\--------+
1936 // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] :
1937 // +--------+-\-------+-\------ +-\------ +-///--------+
1938 // ^
1939 // The first page is mapped into this hierarchy at a PMD_SHIFT
1940 // aligned virtual address, so that we can manipulate the PTE
1941 // level entries while the mapping is active. The first entry
1942 // covers the PTE[] page itself, the remaining entries are free
1943 // to be used as a ad-hoc fixmap.
1944 //
1945 create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
1946 KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
1947 kpti_ng_pgd_alloc, 0);
1948 }
1949
1950 cpu_install_idmap();
1951 remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
1952 cpu_uninstall_idmap();
1953
1954 if (!cpu) {
1955 free_pages(alloc, order);
1956 arm64_use_ng_mappings = true;
1957 }
1958
1959 return 0;
1960 }
1961
kpti_install_ng_mappings(void)1962 static void __init kpti_install_ng_mappings(void)
1963 {
1964 /* Check whether KPTI is going to be used */
1965 if (!arm64_kernel_unmapped_at_el0())
1966 return;
1967
1968 /*
1969 * We don't need to rewrite the page-tables if either we've done
1970 * it already or we have KASLR enabled and therefore have not
1971 * created any global mappings at all.
1972 */
1973 if (arm64_use_ng_mappings)
1974 return;
1975
1976 stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
1977 }
1978
1979 #else
kpti_install_ng_mappings(void)1980 static inline void kpti_install_ng_mappings(void)
1981 {
1982 }
1983 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1984
cpu_enable_kpti(struct arm64_cpu_capabilities const * cap)1985 static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap)
1986 {
1987 if (__this_cpu_read(this_cpu_vector) == vectors) {
1988 const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
1989
1990 __this_cpu_write(this_cpu_vector, v);
1991 }
1992
1993 }
1994
parse_kpti(char * str)1995 static int __init parse_kpti(char *str)
1996 {
1997 bool enabled;
1998 int ret = kstrtobool(str, &enabled);
1999
2000 if (ret)
2001 return ret;
2002
2003 __kpti_forced = enabled ? 1 : -1;
2004 return 0;
2005 }
2006 early_param("kpti", parse_kpti);
2007
2008 #ifdef CONFIG_ARM64_HW_AFDBM
2009 static struct cpumask dbm_cpus __read_mostly;
2010
__cpu_enable_hw_dbm(void)2011 static inline void __cpu_enable_hw_dbm(void)
2012 {
2013 u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
2014
2015 write_sysreg(tcr, tcr_el1);
2016 isb();
2017 local_flush_tlb_all();
2018 }
2019
cpu_has_broken_dbm(void)2020 static bool cpu_has_broken_dbm(void)
2021 {
2022 /* List of CPUs which have broken DBM support. */
2023 static const struct midr_range cpus[] = {
2024 #ifdef CONFIG_ARM64_ERRATUM_1024718
2025 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
2026 /* Kryo4xx Silver (rdpe => r1p0) */
2027 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
2028 #endif
2029 #ifdef CONFIG_ARM64_ERRATUM_2051678
2030 MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
2031 #endif
2032 {},
2033 };
2034
2035 return is_midr_in_range_list(read_cpuid_id(), cpus);
2036 }
2037
cpu_can_use_dbm(const struct arm64_cpu_capabilities * cap)2038 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
2039 {
2040 return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
2041 !cpu_has_broken_dbm();
2042 }
2043
cpu_enable_hw_dbm(struct arm64_cpu_capabilities const * cap)2044 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
2045 {
2046 if (cpu_can_use_dbm(cap)) {
2047 __cpu_enable_hw_dbm();
2048 cpumask_set_cpu(smp_processor_id(), &dbm_cpus);
2049 }
2050 }
2051
has_hw_dbm(const struct arm64_cpu_capabilities * cap,int __unused)2052 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
2053 int __unused)
2054 {
2055 /*
2056 * DBM is a non-conflicting feature. i.e, the kernel can safely
2057 * run a mix of CPUs with and without the feature. So, we
2058 * unconditionally enable the capability to allow any late CPU
2059 * to use the feature. We only enable the control bits on the
2060 * CPU, if it is supported.
2061 */
2062
2063 return true;
2064 }
2065
2066 #endif
2067
2068 #ifdef CONFIG_ARM64_AMU_EXTN
2069
2070 /*
2071 * The "amu_cpus" cpumask only signals that the CPU implementation for the
2072 * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
2073 * information regarding all the events that it supports. When a CPU bit is
2074 * set in the cpumask, the user of this feature can only rely on the presence
2075 * of the 4 fixed counters for that CPU. But this does not guarantee that the
2076 * counters are enabled or access to these counters is enabled by code
2077 * executed at higher exception levels (firmware).
2078 */
2079 static struct cpumask amu_cpus __read_mostly;
2080
cpu_has_amu_feat(int cpu)2081 bool cpu_has_amu_feat(int cpu)
2082 {
2083 return cpumask_test_cpu(cpu, &amu_cpus);
2084 }
2085
get_cpu_with_amu_feat(void)2086 int get_cpu_with_amu_feat(void)
2087 {
2088 return cpumask_any(&amu_cpus);
2089 }
2090
cpu_amu_enable(struct arm64_cpu_capabilities const * cap)2091 static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
2092 {
2093 if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
2094 cpumask_set_cpu(smp_processor_id(), &amu_cpus);
2095
2096 /* 0 reference values signal broken/disabled counters */
2097 if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
2098 update_freq_counters_refs();
2099 }
2100 }
2101
has_amu(const struct arm64_cpu_capabilities * cap,int __unused)2102 static bool has_amu(const struct arm64_cpu_capabilities *cap,
2103 int __unused)
2104 {
2105 /*
2106 * The AMU extension is a non-conflicting feature: the kernel can
2107 * safely run a mix of CPUs with and without support for the
2108 * activity monitors extension. Therefore, unconditionally enable
2109 * the capability to allow any late CPU to use the feature.
2110 *
2111 * With this feature unconditionally enabled, the cpu_enable
2112 * function will be called for all CPUs that match the criteria,
2113 * including secondary and hotplugged, marking this feature as
2114 * present on that respective CPU. The enable function will also
2115 * print a detection message.
2116 */
2117
2118 return true;
2119 }
2120 #else
get_cpu_with_amu_feat(void)2121 int get_cpu_with_amu_feat(void)
2122 {
2123 return nr_cpu_ids;
2124 }
2125 #endif
2126
runs_at_el2(const struct arm64_cpu_capabilities * entry,int __unused)2127 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
2128 {
2129 return is_kernel_in_hyp_mode();
2130 }
2131
cpu_copy_el2regs(const struct arm64_cpu_capabilities * __unused)2132 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
2133 {
2134 /*
2135 * Copy register values that aren't redirected by hardware.
2136 *
2137 * Before code patching, we only set tpidr_el1, all CPUs need to copy
2138 * this value to tpidr_el2 before we patch the code. Once we've done
2139 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
2140 * do anything here.
2141 */
2142 if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
2143 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
2144 }
2145
has_nested_virt_support(const struct arm64_cpu_capabilities * cap,int scope)2146 static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap,
2147 int scope)
2148 {
2149 if (kvm_get_mode() != KVM_MODE_NV)
2150 return false;
2151
2152 if (!has_cpuid_feature(cap, scope)) {
2153 pr_warn("unavailable: %s\n", cap->desc);
2154 return false;
2155 }
2156
2157 return true;
2158 }
2159
hvhe_possible(const struct arm64_cpu_capabilities * entry,int __unused)2160 static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
2161 int __unused)
2162 {
2163 return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
2164 }
2165
2166 #ifdef CONFIG_ARM64_PAN
cpu_enable_pan(const struct arm64_cpu_capabilities * __unused)2167 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
2168 {
2169 /*
2170 * We modify PSTATE. This won't work from irq context as the PSTATE
2171 * is discarded once we return from the exception.
2172 */
2173 WARN_ON_ONCE(in_interrupt());
2174
2175 sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
2176 set_pstate_pan(1);
2177 }
2178 #endif /* CONFIG_ARM64_PAN */
2179
2180 #ifdef CONFIG_ARM64_RAS_EXTN
cpu_clear_disr(const struct arm64_cpu_capabilities * __unused)2181 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
2182 {
2183 /* Firmware may have left a deferred SError in this register. */
2184 write_sysreg_s(0, SYS_DISR_EL1);
2185 }
2186 #endif /* CONFIG_ARM64_RAS_EXTN */
2187
2188 #ifdef CONFIG_ARM64_PTR_AUTH
has_address_auth_cpucap(const struct arm64_cpu_capabilities * entry,int scope)2189 static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
2190 {
2191 int boot_val, sec_val;
2192
2193 /* We don't expect to be called with SCOPE_SYSTEM */
2194 WARN_ON(scope == SCOPE_SYSTEM);
2195 /*
2196 * The ptr-auth feature levels are not intercompatible with lower
2197 * levels. Hence we must match ptr-auth feature level of the secondary
2198 * CPUs with that of the boot CPU. The level of boot cpu is fetched
2199 * from the sanitised register whereas direct register read is done for
2200 * the secondary CPUs.
2201 * The sanitised feature state is guaranteed to match that of the
2202 * boot CPU as a mismatched secondary CPU is parked before it gets
2203 * a chance to update the state, with the capability.
2204 */
2205 boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
2206 entry->field_pos, entry->sign);
2207 if (scope & SCOPE_BOOT_CPU)
2208 return boot_val >= entry->min_field_value;
2209 /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
2210 sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
2211 entry->field_pos, entry->sign);
2212 return (sec_val >= entry->min_field_value) && (sec_val == boot_val);
2213 }
2214
has_address_auth_metacap(const struct arm64_cpu_capabilities * entry,int scope)2215 static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
2216 int scope)
2217 {
2218 bool api = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
2219 bool apa = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
2220 bool apa3 = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
2221
2222 return apa || apa3 || api;
2223 }
2224
has_generic_auth(const struct arm64_cpu_capabilities * entry,int __unused)2225 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
2226 int __unused)
2227 {
2228 bool gpi = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
2229 bool gpa = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5);
2230 bool gpa3 = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3);
2231
2232 return gpa || gpa3 || gpi;
2233 }
2234 #endif /* CONFIG_ARM64_PTR_AUTH */
2235
2236 #ifdef CONFIG_ARM64_E0PD
cpu_enable_e0pd(struct arm64_cpu_capabilities const * cap)2237 static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
2238 {
2239 if (this_cpu_has_cap(ARM64_HAS_E0PD))
2240 sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
2241 }
2242 #endif /* CONFIG_ARM64_E0PD */
2243
2244 #ifdef CONFIG_ARM64_PSEUDO_NMI
can_use_gic_priorities(const struct arm64_cpu_capabilities * entry,int scope)2245 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
2246 int scope)
2247 {
2248 /*
2249 * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU
2250 * feature, so will be detected earlier.
2251 */
2252 BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS);
2253 if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS))
2254 return false;
2255
2256 return enable_pseudo_nmi;
2257 }
2258
has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities * entry,int scope)2259 static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry,
2260 int scope)
2261 {
2262 /*
2263 * If we're not using priority masking then we won't be poking PMR_EL1,
2264 * and there's no need to relax synchronization of writes to it, and
2265 * ICC_CTLR_EL1 might not be accessible and we must avoid reads from
2266 * that.
2267 *
2268 * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU
2269 * feature, so will be detected earlier.
2270 */
2271 BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING);
2272 if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING))
2273 return false;
2274
2275 /*
2276 * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a
2277 * hint for interrupt distribution, a DSB is not necessary when
2278 * unmasking IRQs via PMR, and we can relax the barrier to a NOP.
2279 *
2280 * Linux itself doesn't use 1:N distribution, so has no need to
2281 * set PMHE. The only reason to have it set is if EL3 requires it
2282 * (and we can't change it).
2283 */
2284 return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0;
2285 }
2286 #endif
2287
2288 #ifdef CONFIG_ARM64_BTI
bti_enable(const struct arm64_cpu_capabilities * __unused)2289 static void bti_enable(const struct arm64_cpu_capabilities *__unused)
2290 {
2291 /*
2292 * Use of X16/X17 for tail-calls and trampolines that jump to
2293 * function entry points using BR is a requirement for
2294 * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
2295 * So, be strict and forbid other BRs using other registers to
2296 * jump onto a PACIxSP instruction:
2297 */
2298 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
2299 isb();
2300 }
2301 #endif /* CONFIG_ARM64_BTI */
2302
2303 #ifdef CONFIG_ARM64_MTE
cpu_enable_mte(struct arm64_cpu_capabilities const * cap)2304 static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
2305 {
2306 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
2307
2308 mte_cpu_setup();
2309
2310 /*
2311 * Clear the tags in the zero page. This needs to be done via the
2312 * linear map which has the Tagged attribute.
2313 */
2314 if (try_page_mte_tagging(ZERO_PAGE(0))) {
2315 mte_clear_page_tags(lm_alias(empty_zero_page));
2316 set_page_mte_tagged(ZERO_PAGE(0));
2317 }
2318
2319 kasan_init_hw_tags_cpu();
2320 }
2321 #endif /* CONFIG_ARM64_MTE */
2322
user_feature_fixup(void)2323 static void user_feature_fixup(void)
2324 {
2325 if (cpus_have_cap(ARM64_WORKAROUND_2658417)) {
2326 struct arm64_ftr_reg *regp;
2327
2328 regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2329 if (regp)
2330 regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
2331 }
2332
2333 if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
2334 struct arm64_ftr_reg *regp;
2335
2336 regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
2337 if (regp)
2338 regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK;
2339 }
2340 }
2341
elf_hwcap_fixup(void)2342 static void elf_hwcap_fixup(void)
2343 {
2344 #ifdef CONFIG_COMPAT
2345 if (cpus_have_cap(ARM64_WORKAROUND_1742098))
2346 compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
2347 #endif /* CONFIG_COMPAT */
2348 }
2349
2350 #ifdef CONFIG_KVM
is_kvm_protected_mode(const struct arm64_cpu_capabilities * entry,int __unused)2351 static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
2352 {
2353 return kvm_get_mode() == KVM_MODE_PROTECTED;
2354 }
2355 #endif /* CONFIG_KVM */
2356
cpu_trap_el0_impdef(const struct arm64_cpu_capabilities * __unused)2357 static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused)
2358 {
2359 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP);
2360 }
2361
cpu_enable_dit(const struct arm64_cpu_capabilities * __unused)2362 static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused)
2363 {
2364 set_pstate_dit(1);
2365 }
2366
cpu_enable_mops(const struct arm64_cpu_capabilities * __unused)2367 static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
2368 {
2369 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn);
2370 }
2371
2372 #ifdef CONFIG_ARM64_POE
cpu_enable_poe(const struct arm64_cpu_capabilities * __unused)2373 static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused)
2374 {
2375 sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE);
2376 sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE);
2377 }
2378 #endif
2379
2380 /* Internal helper functions to match cpu capability type */
2381 static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities * cap)2382 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
2383 {
2384 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
2385 }
2386
2387 static bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities * cap)2388 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
2389 {
2390 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
2391 }
2392
2393 static bool
cpucap_panic_on_conflict(const struct arm64_cpu_capabilities * cap)2394 cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
2395 {
2396 return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
2397 }
2398
2399 static bool
test_has_mpam(const struct arm64_cpu_capabilities * entry,int scope)2400 test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope)
2401 {
2402 if (!has_cpuid_feature(entry, scope))
2403 return false;
2404
2405 /* Check firmware actually enabled MPAM on this cpu. */
2406 return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN);
2407 }
2408
2409 static void
cpu_enable_mpam(const struct arm64_cpu_capabilities * entry)2410 cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
2411 {
2412 /*
2413 * Access by the kernel (at EL1) should use the reserved PARTID
2414 * which is configured unrestricted. This avoids priority-inversion
2415 * where latency sensitive tasks have to wait for a task that has
2416 * been throttled to release the lock.
2417 */
2418 write_sysreg_s(0, SYS_MPAM1_EL1);
2419 }
2420
2421 static bool
test_has_mpam_hcr(const struct arm64_cpu_capabilities * entry,int scope)2422 test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope)
2423 {
2424 u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
2425
2426 return idr & MPAMIDR_EL1_HAS_HCR;
2427 }
2428
2429 static const struct arm64_cpu_capabilities arm64_features[] = {
2430 {
2431 .capability = ARM64_ALWAYS_BOOT,
2432 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2433 .matches = has_always,
2434 },
2435 {
2436 .capability = ARM64_ALWAYS_SYSTEM,
2437 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2438 .matches = has_always,
2439 },
2440 {
2441 .desc = "GIC system register CPU interface",
2442 .capability = ARM64_HAS_GIC_CPUIF_SYSREGS,
2443 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2444 .matches = has_useable_gicv3_cpuif,
2445 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP)
2446 },
2447 {
2448 .desc = "Enhanced Counter Virtualization",
2449 .capability = ARM64_HAS_ECV,
2450 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2451 .matches = has_cpuid_feature,
2452 ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, IMP)
2453 },
2454 {
2455 .desc = "Enhanced Counter Virtualization (CNTPOFF)",
2456 .capability = ARM64_HAS_ECV_CNTPOFF,
2457 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2458 .matches = has_cpuid_feature,
2459 ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF)
2460 },
2461 #ifdef CONFIG_ARM64_PAN
2462 {
2463 .desc = "Privileged Access Never",
2464 .capability = ARM64_HAS_PAN,
2465 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2466 .matches = has_cpuid_feature,
2467 .cpu_enable = cpu_enable_pan,
2468 ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP)
2469 },
2470 #endif /* CONFIG_ARM64_PAN */
2471 #ifdef CONFIG_ARM64_EPAN
2472 {
2473 .desc = "Enhanced Privileged Access Never",
2474 .capability = ARM64_HAS_EPAN,
2475 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2476 .matches = has_cpuid_feature,
2477 ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3)
2478 },
2479 #endif /* CONFIG_ARM64_EPAN */
2480 #ifdef CONFIG_ARM64_LSE_ATOMICS
2481 {
2482 .desc = "LSE atomic instructions",
2483 .capability = ARM64_HAS_LSE_ATOMICS,
2484 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2485 .matches = has_cpuid_feature,
2486 ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
2487 },
2488 #endif /* CONFIG_ARM64_LSE_ATOMICS */
2489 {
2490 .desc = "Virtualization Host Extensions",
2491 .capability = ARM64_HAS_VIRT_HOST_EXTN,
2492 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2493 .matches = runs_at_el2,
2494 .cpu_enable = cpu_copy_el2regs,
2495 },
2496 {
2497 .desc = "Nested Virtualization Support",
2498 .capability = ARM64_HAS_NESTED_VIRT,
2499 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2500 .matches = has_nested_virt_support,
2501 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
2502 },
2503 {
2504 .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
2505 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2506 .matches = has_32bit_el0,
2507 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL0, AARCH32)
2508 },
2509 #ifdef CONFIG_KVM
2510 {
2511 .desc = "32-bit EL1 Support",
2512 .capability = ARM64_HAS_32BIT_EL1,
2513 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2514 .matches = has_cpuid_feature,
2515 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL1, AARCH32)
2516 },
2517 {
2518 .desc = "Protected KVM",
2519 .capability = ARM64_KVM_PROTECTED_MODE,
2520 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2521 .matches = is_kvm_protected_mode,
2522 },
2523 {
2524 .desc = "HCRX_EL2 register",
2525 .capability = ARM64_HAS_HCX,
2526 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2527 .matches = has_cpuid_feature,
2528 ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP)
2529 },
2530 #endif
2531 {
2532 .desc = "Kernel page table isolation (KPTI)",
2533 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
2534 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2535 .cpu_enable = cpu_enable_kpti,
2536 .matches = unmap_kernel_at_el0,
2537 /*
2538 * The ID feature fields below are used to indicate that
2539 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
2540 * more details.
2541 */
2542 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP)
2543 },
2544 {
2545 .capability = ARM64_HAS_FPSIMD,
2546 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2547 .matches = has_cpuid_feature,
2548 .cpu_enable = cpu_enable_fpsimd,
2549 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, FP, IMP)
2550 },
2551 #ifdef CONFIG_ARM64_PMEM
2552 {
2553 .desc = "Data cache clean to Point of Persistence",
2554 .capability = ARM64_HAS_DCPOP,
2555 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2556 .matches = has_cpuid_feature,
2557 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, IMP)
2558 },
2559 {
2560 .desc = "Data cache clean to Point of Deep Persistence",
2561 .capability = ARM64_HAS_DCPODP,
2562 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2563 .matches = has_cpuid_feature,
2564 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, DPB2)
2565 },
2566 #endif
2567 #ifdef CONFIG_ARM64_SVE
2568 {
2569 .desc = "Scalable Vector Extension",
2570 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2571 .capability = ARM64_SVE,
2572 .cpu_enable = cpu_enable_sve,
2573 .matches = has_cpuid_feature,
2574 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
2575 },
2576 #endif /* CONFIG_ARM64_SVE */
2577 #ifdef CONFIG_ARM64_RAS_EXTN
2578 {
2579 .desc = "RAS Extension Support",
2580 .capability = ARM64_HAS_RAS_EXTN,
2581 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2582 .matches = has_cpuid_feature,
2583 .cpu_enable = cpu_clear_disr,
2584 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
2585 },
2586 #endif /* CONFIG_ARM64_RAS_EXTN */
2587 #ifdef CONFIG_ARM64_AMU_EXTN
2588 {
2589 .desc = "Activity Monitors Unit (AMU)",
2590 .capability = ARM64_HAS_AMU_EXTN,
2591 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2592 .matches = has_amu,
2593 .cpu_enable = cpu_amu_enable,
2594 .cpus = &amu_cpus,
2595 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP)
2596 },
2597 #endif /* CONFIG_ARM64_AMU_EXTN */
2598 {
2599 .desc = "Data cache clean to the PoU not required for I/D coherence",
2600 .capability = ARM64_HAS_CACHE_IDC,
2601 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2602 .matches = has_cache_idc,
2603 .cpu_enable = cpu_emulate_effective_ctr,
2604 },
2605 {
2606 .desc = "Instruction cache invalidation not required for I/D coherence",
2607 .capability = ARM64_HAS_CACHE_DIC,
2608 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2609 .matches = has_cache_dic,
2610 },
2611 {
2612 .desc = "Stage-2 Force Write-Back",
2613 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2614 .capability = ARM64_HAS_STAGE2_FWB,
2615 .matches = has_cpuid_feature,
2616 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, FWB, IMP)
2617 },
2618 {
2619 .desc = "ARMv8.4 Translation Table Level",
2620 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2621 .capability = ARM64_HAS_ARMv8_4_TTL,
2622 .matches = has_cpuid_feature,
2623 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, TTL, IMP)
2624 },
2625 {
2626 .desc = "TLB range maintenance instructions",
2627 .capability = ARM64_HAS_TLB_RANGE,
2628 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2629 .matches = has_cpuid_feature,
2630 ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, TLB, RANGE)
2631 },
2632 #ifdef CONFIG_ARM64_HW_AFDBM
2633 {
2634 .desc = "Hardware dirty bit management",
2635 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2636 .capability = ARM64_HW_DBM,
2637 .matches = has_hw_dbm,
2638 .cpu_enable = cpu_enable_hw_dbm,
2639 .cpus = &dbm_cpus,
2640 ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM)
2641 },
2642 #endif
2643 {
2644 .desc = "CRC32 instructions",
2645 .capability = ARM64_HAS_CRC32,
2646 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2647 .matches = has_cpuid_feature,
2648 ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, CRC32, IMP)
2649 },
2650 {
2651 .desc = "Speculative Store Bypassing Safe (SSBS)",
2652 .capability = ARM64_SSBS,
2653 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2654 .matches = has_cpuid_feature,
2655 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SSBS, IMP)
2656 },
2657 #ifdef CONFIG_ARM64_CNP
2658 {
2659 .desc = "Common not Private translations",
2660 .capability = ARM64_HAS_CNP,
2661 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2662 .matches = has_useable_cnp,
2663 .cpu_enable = cpu_enable_cnp,
2664 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, CnP, IMP)
2665 },
2666 #endif
2667 {
2668 .desc = "Speculation barrier (SB)",
2669 .capability = ARM64_HAS_SB,
2670 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2671 .matches = has_cpuid_feature,
2672 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, SB, IMP)
2673 },
2674 #ifdef CONFIG_ARM64_PTR_AUTH
2675 {
2676 .desc = "Address authentication (architected QARMA5 algorithm)",
2677 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5,
2678 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2679 .matches = has_address_auth_cpucap,
2680 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth)
2681 },
2682 {
2683 .desc = "Address authentication (architected QARMA3 algorithm)",
2684 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
2685 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2686 .matches = has_address_auth_cpucap,
2687 ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth)
2688 },
2689 {
2690 .desc = "Address authentication (IMP DEF algorithm)",
2691 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
2692 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2693 .matches = has_address_auth_cpucap,
2694 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth)
2695 },
2696 {
2697 .capability = ARM64_HAS_ADDRESS_AUTH,
2698 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2699 .matches = has_address_auth_metacap,
2700 },
2701 {
2702 .desc = "Generic authentication (architected QARMA5 algorithm)",
2703 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5,
2704 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2705 .matches = has_cpuid_feature,
2706 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP)
2707 },
2708 {
2709 .desc = "Generic authentication (architected QARMA3 algorithm)",
2710 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
2711 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2712 .matches = has_cpuid_feature,
2713 ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP)
2714 },
2715 {
2716 .desc = "Generic authentication (IMP DEF algorithm)",
2717 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
2718 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2719 .matches = has_cpuid_feature,
2720 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP)
2721 },
2722 {
2723 .capability = ARM64_HAS_GENERIC_AUTH,
2724 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2725 .matches = has_generic_auth,
2726 },
2727 #endif /* CONFIG_ARM64_PTR_AUTH */
2728 #ifdef CONFIG_ARM64_PSEUDO_NMI
2729 {
2730 /*
2731 * Depends on having GICv3
2732 */
2733 .desc = "IRQ priority masking",
2734 .capability = ARM64_HAS_GIC_PRIO_MASKING,
2735 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2736 .matches = can_use_gic_priorities,
2737 },
2738 {
2739 /*
2740 * Depends on ARM64_HAS_GIC_PRIO_MASKING
2741 */
2742 .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC,
2743 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2744 .matches = has_gic_prio_relaxed_sync,
2745 },
2746 #endif
2747 #ifdef CONFIG_ARM64_E0PD
2748 {
2749 .desc = "E0PD",
2750 .capability = ARM64_HAS_E0PD,
2751 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2752 .cpu_enable = cpu_enable_e0pd,
2753 .matches = has_cpuid_feature,
2754 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, E0PD, IMP)
2755 },
2756 #endif
2757 {
2758 .desc = "Random Number Generator",
2759 .capability = ARM64_HAS_RNG,
2760 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2761 .matches = has_cpuid_feature,
2762 ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, RNDR, IMP)
2763 },
2764 #ifdef CONFIG_ARM64_BTI
2765 {
2766 .desc = "Branch Target Identification",
2767 .capability = ARM64_BTI,
2768 #ifdef CONFIG_ARM64_BTI_KERNEL
2769 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2770 #else
2771 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2772 #endif
2773 .matches = has_cpuid_feature,
2774 .cpu_enable = bti_enable,
2775 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, BT, IMP)
2776 },
2777 #endif
2778 #ifdef CONFIG_ARM64_MTE
2779 {
2780 .desc = "Memory Tagging Extension",
2781 .capability = ARM64_MTE,
2782 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2783 .matches = has_cpuid_feature,
2784 .cpu_enable = cpu_enable_mte,
2785 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE2)
2786 },
2787 {
2788 .desc = "Asymmetric MTE Tag Check Fault",
2789 .capability = ARM64_MTE_ASYMM,
2790 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2791 .matches = has_cpuid_feature,
2792 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3)
2793 },
2794 #endif /* CONFIG_ARM64_MTE */
2795 {
2796 .desc = "RCpc load-acquire (LDAPR)",
2797 .capability = ARM64_HAS_LDAPR,
2798 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2799 .matches = has_cpuid_feature,
2800 ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP)
2801 },
2802 {
2803 .desc = "Fine Grained Traps",
2804 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2805 .capability = ARM64_HAS_FGT,
2806 .matches = has_cpuid_feature,
2807 ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP)
2808 },
2809 #ifdef CONFIG_ARM64_SME
2810 {
2811 .desc = "Scalable Matrix Extension",
2812 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2813 .capability = ARM64_SME,
2814 .matches = has_cpuid_feature,
2815 .cpu_enable = cpu_enable_sme,
2816 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
2817 },
2818 /* FA64 should be sorted after the base SME capability */
2819 {
2820 .desc = "FA64",
2821 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2822 .capability = ARM64_SME_FA64,
2823 .matches = has_cpuid_feature,
2824 .cpu_enable = cpu_enable_fa64,
2825 ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
2826 },
2827 {
2828 .desc = "SME2",
2829 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2830 .capability = ARM64_SME2,
2831 .matches = has_cpuid_feature,
2832 .cpu_enable = cpu_enable_sme2,
2833 ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
2834 },
2835 #endif /* CONFIG_ARM64_SME */
2836 {
2837 .desc = "WFx with timeout",
2838 .capability = ARM64_HAS_WFXT,
2839 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2840 .matches = has_cpuid_feature,
2841 ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, WFxT, IMP)
2842 },
2843 {
2844 .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality",
2845 .capability = ARM64_HAS_TIDCP1,
2846 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2847 .matches = has_cpuid_feature,
2848 .cpu_enable = cpu_trap_el0_impdef,
2849 ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TIDCP1, IMP)
2850 },
2851 {
2852 .desc = "Data independent timing control (DIT)",
2853 .capability = ARM64_HAS_DIT,
2854 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2855 .matches = has_cpuid_feature,
2856 .cpu_enable = cpu_enable_dit,
2857 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
2858 },
2859 {
2860 .desc = "Memory Copy and Memory Set instructions",
2861 .capability = ARM64_HAS_MOPS,
2862 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2863 .matches = has_cpuid_feature,
2864 .cpu_enable = cpu_enable_mops,
2865 ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP)
2866 },
2867 {
2868 .capability = ARM64_HAS_TCR2,
2869 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2870 .matches = has_cpuid_feature,
2871 ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, TCRX, IMP)
2872 },
2873 {
2874 .desc = "Stage-1 Permission Indirection Extension (S1PIE)",
2875 .capability = ARM64_HAS_S1PIE,
2876 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2877 .matches = has_cpuid_feature,
2878 ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1PIE, IMP)
2879 },
2880 {
2881 .desc = "VHE for hypervisor only",
2882 .capability = ARM64_KVM_HVHE,
2883 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2884 .matches = hvhe_possible,
2885 },
2886 {
2887 .desc = "Enhanced Virtualization Traps",
2888 .capability = ARM64_HAS_EVT,
2889 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2890 .matches = has_cpuid_feature,
2891 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
2892 },
2893 {
2894 .desc = "52-bit Virtual Addressing for KVM (LPA2)",
2895 .capability = ARM64_HAS_LPA2,
2896 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2897 .matches = has_lpa2,
2898 },
2899 {
2900 .desc = "FPMR",
2901 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2902 .capability = ARM64_HAS_FPMR,
2903 .matches = has_cpuid_feature,
2904 .cpu_enable = cpu_enable_fpmr,
2905 ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP)
2906 },
2907 #ifdef CONFIG_ARM64_VA_BITS_52
2908 {
2909 .capability = ARM64_HAS_VA52,
2910 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2911 .matches = has_cpuid_feature,
2912 #ifdef CONFIG_ARM64_64K_PAGES
2913 .desc = "52-bit Virtual Addressing (LVA)",
2914 ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, VARange, 52)
2915 #else
2916 .desc = "52-bit Virtual Addressing (LPA2)",
2917 #ifdef CONFIG_ARM64_4K_PAGES
2918 ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN4, 52_BIT)
2919 #else
2920 ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN16, 52_BIT)
2921 #endif
2922 #endif
2923 },
2924 #endif
2925 {
2926 .desc = "Memory Partitioning And Monitoring",
2927 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2928 .capability = ARM64_MPAM,
2929 .matches = test_has_mpam,
2930 .cpu_enable = cpu_enable_mpam,
2931 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1)
2932 },
2933 {
2934 .desc = "Memory Partitioning And Monitoring Virtualisation",
2935 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2936 .capability = ARM64_MPAM_HCR,
2937 .matches = test_has_mpam_hcr,
2938 },
2939 {
2940 .desc = "NV1",
2941 .capability = ARM64_HAS_HCR_NV1,
2942 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2943 .matches = has_nv1,
2944 ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
2945 },
2946 #ifdef CONFIG_ARM64_POE
2947 {
2948 .desc = "Stage-1 Permission Overlay Extension (S1POE)",
2949 .capability = ARM64_HAS_S1POE,
2950 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2951 .matches = has_cpuid_feature,
2952 .cpu_enable = cpu_enable_poe,
2953 ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1POE, IMP)
2954 },
2955 #endif
2956 {},
2957 };
2958
2959 #define HWCAP_CPUID_MATCH(reg, field, min_value) \
2960 .matches = has_user_cpuid_feature, \
2961 ARM64_CPUID_FIELDS(reg, field, min_value)
2962
2963 #define __HWCAP_CAP(name, cap_type, cap) \
2964 .desc = name, \
2965 .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
2966 .hwcap_type = cap_type, \
2967 .hwcap = cap, \
2968
2969 #define HWCAP_CAP(reg, field, min_value, cap_type, cap) \
2970 { \
2971 __HWCAP_CAP(#cap, cap_type, cap) \
2972 HWCAP_CPUID_MATCH(reg, field, min_value) \
2973 }
2974
2975 #define HWCAP_MULTI_CAP(list, cap_type, cap) \
2976 { \
2977 __HWCAP_CAP(#cap, cap_type, cap) \
2978 .matches = cpucap_multi_entry_cap_matches, \
2979 .match_list = list, \
2980 }
2981
2982 #define HWCAP_CAP_MATCH(match, cap_type, cap) \
2983 { \
2984 __HWCAP_CAP(#cap, cap_type, cap) \
2985 .matches = match, \
2986 }
2987
2988 #define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap) \
2989 { \
2990 __HWCAP_CAP(#cap, cap_type, cap) \
2991 HWCAP_CPUID_MATCH(reg, field, min_value) \
2992 .matches = match, \
2993 }
2994
2995 #ifdef CONFIG_ARM64_PTR_AUTH
2996 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
2997 {
2998 HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth)
2999 },
3000 {
3001 HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth)
3002 },
3003 {
3004 HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth)
3005 },
3006 {},
3007 };
3008
3009 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
3010 {
3011 HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP)
3012 },
3013 {
3014 HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP)
3015 },
3016 {
3017 HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP)
3018 },
3019 {},
3020 };
3021 #endif
3022
3023 #ifdef CONFIG_ARM64_SVE
has_sve_feature(const struct arm64_cpu_capabilities * cap,int scope)3024 static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
3025 {
3026 return system_supports_sve() && has_user_cpuid_feature(cap, scope);
3027 }
3028 #endif
3029
3030 #ifdef CONFIG_ARM64_SME
has_sme_feature(const struct arm64_cpu_capabilities * cap,int scope)3031 static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
3032 {
3033 return system_supports_sme() && has_user_cpuid_feature(cap, scope);
3034 }
3035 #endif
3036
3037 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
3038 HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
3039 HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
3040 HWCAP_CAP(ID_AA64ISAR0_EL1, SHA1, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1),
3041 HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2),
3042 HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512),
3043 HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32),
3044 HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
3045 HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, FEAT_LSE128, CAP_HWCAP, KERNEL_HWCAP_LSE128),
3046 HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
3047 HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3),
3048 HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3),
3049 HWCAP_CAP(ID_AA64ISAR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SM4),
3050 HWCAP_CAP(ID_AA64ISAR0_EL1, DP, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
3051 HWCAP_CAP(ID_AA64ISAR0_EL1, FHM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
3052 HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
3053 HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
3054 HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG),
3055 HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP),
3056 HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP),
3057 HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
3058 HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
3059 HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT),
3060 HWCAP_CAP(ID_AA64PFR2_EL1, FPMR, IMP, CAP_HWCAP, KERNEL_HWCAP_FPMR),
3061 HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
3062 HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
3063 HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
3064 HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA),
3065 HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
3066 HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
3067 HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC3, CAP_HWCAP, KERNEL_HWCAP_LRCPC3),
3068 HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT),
3069 HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB),
3070 HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16),
3071 HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16),
3072 HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH),
3073 HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM),
3074 HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT),
3075 HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX),
3076 HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
3077 #ifdef CONFIG_ARM64_SVE
3078 HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
3079 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
3080 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
3081 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
3082 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
3083 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
3084 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
3085 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
3086 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
3087 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
3088 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
3089 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
3090 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
3091 HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
3092 #endif
3093 HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
3094 #ifdef CONFIG_ARM64_BTI
3095 HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI),
3096 #endif
3097 #ifdef CONFIG_ARM64_PTR_AUTH
3098 HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
3099 HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
3100 #endif
3101 #ifdef CONFIG_ARM64_MTE
3102 HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
3103 HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
3104 #endif /* CONFIG_ARM64_MTE */
3105 HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV),
3106 HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP),
3107 HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC),
3108 HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM),
3109 HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
3110 HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
3111 HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
3112 HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
3113 #ifdef CONFIG_ARM64_SME
3114 HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
3115 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
3116 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
3117 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
3118 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
3119 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
3120 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
3121 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
3122 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
3123 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
3124 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
3125 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
3126 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
3127 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
3128 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
3129 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
3130 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
3131 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
3132 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
3133 HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
3134 #endif /* CONFIG_ARM64_SME */
3135 HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
3136 HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
3137 HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4),
3138 HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2),
3139 HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3),
3140 HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2),
3141 #ifdef CONFIG_ARM64_POE
3142 HWCAP_CAP(ID_AA64MMFR3_EL1, S1POE, IMP, CAP_HWCAP, KERNEL_HWCAP_POE),
3143 #endif
3144 {},
3145 };
3146
3147 #ifdef CONFIG_COMPAT
compat_has_neon(const struct arm64_cpu_capabilities * cap,int scope)3148 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
3149 {
3150 /*
3151 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
3152 * in line with that of arm32 as in vfp_init(). We make sure that the
3153 * check is future proof, by making sure value is non-zero.
3154 */
3155 u32 mvfr1;
3156
3157 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
3158 if (scope == SCOPE_SYSTEM)
3159 mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
3160 else
3161 mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
3162
3163 return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDSP_SHIFT) &&
3164 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDInt_SHIFT) &&
3165 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDLS_SHIFT);
3166 }
3167 #endif
3168
3169 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
3170 #ifdef CONFIG_COMPAT
3171 HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
3172 HWCAP_CAP(MVFR1_EL1, SIMDFMAC, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
3173 /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
3174 HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
3175 HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
3176 HWCAP_CAP(MVFR1_EL1, FPHP, FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP),
3177 HWCAP_CAP(MVFR1_EL1, SIMDHP, SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP),
3178 HWCAP_CAP(ID_ISAR5_EL1, AES, VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
3179 HWCAP_CAP(ID_ISAR5_EL1, AES, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
3180 HWCAP_CAP(ID_ISAR5_EL1, SHA1, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
3181 HWCAP_CAP(ID_ISAR5_EL1, SHA2, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
3182 HWCAP_CAP(ID_ISAR5_EL1, CRC32, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
3183 HWCAP_CAP(ID_ISAR6_EL1, DP, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP),
3184 HWCAP_CAP(ID_ISAR6_EL1, FHM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM),
3185 HWCAP_CAP(ID_ISAR6_EL1, SB, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB),
3186 HWCAP_CAP(ID_ISAR6_EL1, BF16, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16),
3187 HWCAP_CAP(ID_ISAR6_EL1, I8MM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM),
3188 HWCAP_CAP(ID_PFR2_EL1, SSBS, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS),
3189 #endif
3190 {},
3191 };
3192
cap_set_elf_hwcap(const struct arm64_cpu_capabilities * cap)3193 static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
3194 {
3195 switch (cap->hwcap_type) {
3196 case CAP_HWCAP:
3197 cpu_set_feature(cap->hwcap);
3198 break;
3199 #ifdef CONFIG_COMPAT
3200 case CAP_COMPAT_HWCAP:
3201 compat_elf_hwcap |= (u32)cap->hwcap;
3202 break;
3203 case CAP_COMPAT_HWCAP2:
3204 compat_elf_hwcap2 |= (u32)cap->hwcap;
3205 break;
3206 #endif
3207 default:
3208 WARN_ON(1);
3209 break;
3210 }
3211 }
3212
3213 /* Check if we have a particular HWCAP enabled */
cpus_have_elf_hwcap(const struct arm64_cpu_capabilities * cap)3214 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
3215 {
3216 bool rc;
3217
3218 switch (cap->hwcap_type) {
3219 case CAP_HWCAP:
3220 rc = cpu_have_feature(cap->hwcap);
3221 break;
3222 #ifdef CONFIG_COMPAT
3223 case CAP_COMPAT_HWCAP:
3224 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
3225 break;
3226 case CAP_COMPAT_HWCAP2:
3227 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
3228 break;
3229 #endif
3230 default:
3231 WARN_ON(1);
3232 rc = false;
3233 }
3234
3235 return rc;
3236 }
3237
setup_elf_hwcaps(const struct arm64_cpu_capabilities * hwcaps)3238 static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
3239 {
3240 /* We support emulation of accesses to CPU ID feature registers */
3241 cpu_set_named_feature(CPUID);
3242 for (; hwcaps->matches; hwcaps++)
3243 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
3244 cap_set_elf_hwcap(hwcaps);
3245 }
3246
update_cpu_capabilities(u16 scope_mask)3247 static void update_cpu_capabilities(u16 scope_mask)
3248 {
3249 int i;
3250 const struct arm64_cpu_capabilities *caps;
3251
3252 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
3253 for (i = 0; i < ARM64_NCAPS; i++) {
3254 caps = cpucap_ptrs[i];
3255 if (!caps || !(caps->type & scope_mask) ||
3256 cpus_have_cap(caps->capability) ||
3257 !caps->matches(caps, cpucap_default_scope(caps)))
3258 continue;
3259
3260 if (caps->desc && !caps->cpus)
3261 pr_info("detected: %s\n", caps->desc);
3262
3263 __set_bit(caps->capability, system_cpucaps);
3264
3265 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
3266 set_bit(caps->capability, boot_cpucaps);
3267 }
3268 }
3269
3270 /*
3271 * Enable all the available capabilities on this CPU. The capabilities
3272 * with BOOT_CPU scope are handled separately and hence skipped here.
3273 */
cpu_enable_non_boot_scope_capabilities(void * __unused)3274 static int cpu_enable_non_boot_scope_capabilities(void *__unused)
3275 {
3276 int i;
3277 u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
3278
3279 for_each_available_cap(i) {
3280 const struct arm64_cpu_capabilities *cap = cpucap_ptrs[i];
3281
3282 if (WARN_ON(!cap))
3283 continue;
3284
3285 if (!(cap->type & non_boot_scope))
3286 continue;
3287
3288 if (cap->cpu_enable)
3289 cap->cpu_enable(cap);
3290 }
3291 return 0;
3292 }
3293
3294 /*
3295 * Run through the enabled capabilities and enable() it on all active
3296 * CPUs
3297 */
enable_cpu_capabilities(u16 scope_mask)3298 static void __init enable_cpu_capabilities(u16 scope_mask)
3299 {
3300 int i;
3301 const struct arm64_cpu_capabilities *caps;
3302 bool boot_scope;
3303
3304 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
3305 boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
3306
3307 for (i = 0; i < ARM64_NCAPS; i++) {
3308 caps = cpucap_ptrs[i];
3309 if (!caps || !(caps->type & scope_mask) ||
3310 !cpus_have_cap(caps->capability))
3311 continue;
3312
3313 if (boot_scope && caps->cpu_enable)
3314 /*
3315 * Capabilities with SCOPE_BOOT_CPU scope are finalised
3316 * before any secondary CPU boots. Thus, each secondary
3317 * will enable the capability as appropriate via
3318 * check_local_cpu_capabilities(). The only exception is
3319 * the boot CPU, for which the capability must be
3320 * enabled here. This approach avoids costly
3321 * stop_machine() calls for this case.
3322 */
3323 caps->cpu_enable(caps);
3324 }
3325
3326 /*
3327 * For all non-boot scope capabilities, use stop_machine()
3328 * as it schedules the work allowing us to modify PSTATE,
3329 * instead of on_each_cpu() which uses an IPI, giving us a
3330 * PSTATE that disappears when we return.
3331 */
3332 if (!boot_scope)
3333 stop_machine(cpu_enable_non_boot_scope_capabilities,
3334 NULL, cpu_online_mask);
3335 }
3336
3337 /*
3338 * Run through the list of capabilities to check for conflicts.
3339 * If the system has already detected a capability, take necessary
3340 * action on this CPU.
3341 */
verify_local_cpu_caps(u16 scope_mask)3342 static void verify_local_cpu_caps(u16 scope_mask)
3343 {
3344 int i;
3345 bool cpu_has_cap, system_has_cap;
3346 const struct arm64_cpu_capabilities *caps;
3347
3348 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
3349
3350 for (i = 0; i < ARM64_NCAPS; i++) {
3351 caps = cpucap_ptrs[i];
3352 if (!caps || !(caps->type & scope_mask))
3353 continue;
3354
3355 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
3356 system_has_cap = cpus_have_cap(caps->capability);
3357
3358 if (system_has_cap) {
3359 /*
3360 * Check if the new CPU misses an advertised feature,
3361 * which is not safe to miss.
3362 */
3363 if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
3364 break;
3365 /*
3366 * We have to issue cpu_enable() irrespective of
3367 * whether the CPU has it or not, as it is enabeld
3368 * system wide. It is upto the call back to take
3369 * appropriate action on this CPU.
3370 */
3371 if (caps->cpu_enable)
3372 caps->cpu_enable(caps);
3373 } else {
3374 /*
3375 * Check if the CPU has this capability if it isn't
3376 * safe to have when the system doesn't.
3377 */
3378 if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
3379 break;
3380 }
3381 }
3382
3383 if (i < ARM64_NCAPS) {
3384 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
3385 smp_processor_id(), caps->capability,
3386 caps->desc, system_has_cap, cpu_has_cap);
3387
3388 if (cpucap_panic_on_conflict(caps))
3389 cpu_panic_kernel();
3390 else
3391 cpu_die_early();
3392 }
3393 }
3394
3395 /*
3396 * Check for CPU features that are used in early boot
3397 * based on the Boot CPU value.
3398 */
check_early_cpu_features(void)3399 static void check_early_cpu_features(void)
3400 {
3401 verify_cpu_asid_bits();
3402
3403 verify_local_cpu_caps(SCOPE_BOOT_CPU);
3404 }
3405
3406 static void
__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities * caps)3407 __verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
3408 {
3409
3410 for (; caps->matches; caps++)
3411 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
3412 pr_crit("CPU%d: missing HWCAP: %s\n",
3413 smp_processor_id(), caps->desc);
3414 cpu_die_early();
3415 }
3416 }
3417
verify_local_elf_hwcaps(void)3418 static void verify_local_elf_hwcaps(void)
3419 {
3420 __verify_local_elf_hwcaps(arm64_elf_hwcaps);
3421
3422 if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
3423 __verify_local_elf_hwcaps(compat_elf_hwcaps);
3424 }
3425
verify_sve_features(void)3426 static void verify_sve_features(void)
3427 {
3428 unsigned long cpacr = cpacr_save_enable_kernel_sve();
3429
3430 if (vec_verify_vq_map(ARM64_VEC_SVE)) {
3431 pr_crit("CPU%d: SVE: vector length support mismatch\n",
3432 smp_processor_id());
3433 cpu_die_early();
3434 }
3435
3436 cpacr_restore(cpacr);
3437 }
3438
verify_sme_features(void)3439 static void verify_sme_features(void)
3440 {
3441 unsigned long cpacr = cpacr_save_enable_kernel_sme();
3442
3443 if (vec_verify_vq_map(ARM64_VEC_SME)) {
3444 pr_crit("CPU%d: SME: vector length support mismatch\n",
3445 smp_processor_id());
3446 cpu_die_early();
3447 }
3448
3449 cpacr_restore(cpacr);
3450 }
3451
verify_hyp_capabilities(void)3452 static void verify_hyp_capabilities(void)
3453 {
3454 u64 safe_mmfr1, mmfr0, mmfr1;
3455 int parange, ipa_max;
3456 unsigned int safe_vmid_bits, vmid_bits;
3457
3458 if (!IS_ENABLED(CONFIG_KVM))
3459 return;
3460
3461 safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
3462 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
3463 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
3464
3465 /* Verify VMID bits */
3466 safe_vmid_bits = get_vmid_bits(safe_mmfr1);
3467 vmid_bits = get_vmid_bits(mmfr1);
3468 if (vmid_bits < safe_vmid_bits) {
3469 pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
3470 cpu_die_early();
3471 }
3472
3473 /* Verify IPA range */
3474 parange = cpuid_feature_extract_unsigned_field(mmfr0,
3475 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
3476 ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
3477 if (ipa_max < get_kvm_ipa_limit()) {
3478 pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
3479 cpu_die_early();
3480 }
3481 }
3482
verify_mpam_capabilities(void)3483 static void verify_mpam_capabilities(void)
3484 {
3485 u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1);
3486 u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
3487 u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max;
3488
3489 if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) !=
3490 FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) {
3491 pr_crit("CPU%d: MPAM version mismatch\n", smp_processor_id());
3492 cpu_die_early();
3493 }
3494
3495 cpu_idr = read_cpuid(MPAMIDR_EL1);
3496 sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
3497 if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) !=
3498 FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) {
3499 pr_crit("CPU%d: Missing MPAM HCR\n", smp_processor_id());
3500 cpu_die_early();
3501 }
3502
3503 cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr);
3504 cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr);
3505 sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr);
3506 sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr);
3507 if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) {
3508 pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n", smp_processor_id());
3509 cpu_die_early();
3510 }
3511 }
3512
3513 /*
3514 * Run through the enabled system capabilities and enable() it on this CPU.
3515 * The capabilities were decided based on the available CPUs at the boot time.
3516 * Any new CPU should match the system wide status of the capability. If the
3517 * new CPU doesn't have a capability which the system now has enabled, we
3518 * cannot do anything to fix it up and could cause unexpected failures. So
3519 * we park the CPU.
3520 */
verify_local_cpu_capabilities(void)3521 static void verify_local_cpu_capabilities(void)
3522 {
3523 /*
3524 * The capabilities with SCOPE_BOOT_CPU are checked from
3525 * check_early_cpu_features(), as they need to be verified
3526 * on all secondary CPUs.
3527 */
3528 verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3529 verify_local_elf_hwcaps();
3530
3531 if (system_supports_sve())
3532 verify_sve_features();
3533
3534 if (system_supports_sme())
3535 verify_sme_features();
3536
3537 if (is_hyp_mode_available())
3538 verify_hyp_capabilities();
3539
3540 if (system_supports_mpam())
3541 verify_mpam_capabilities();
3542 }
3543
check_local_cpu_capabilities(void)3544 void check_local_cpu_capabilities(void)
3545 {
3546 /*
3547 * All secondary CPUs should conform to the early CPU features
3548 * in use by the kernel based on boot CPU.
3549 */
3550 check_early_cpu_features();
3551
3552 /*
3553 * If we haven't finalised the system capabilities, this CPU gets
3554 * a chance to update the errata work arounds and local features.
3555 * Otherwise, this CPU should verify that it has all the system
3556 * advertised capabilities.
3557 */
3558 if (!system_capabilities_finalized())
3559 update_cpu_capabilities(SCOPE_LOCAL_CPU);
3560 else
3561 verify_local_cpu_capabilities();
3562 }
3563
this_cpu_has_cap(unsigned int n)3564 bool this_cpu_has_cap(unsigned int n)
3565 {
3566 if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
3567 const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n];
3568
3569 if (cap)
3570 return cap->matches(cap, SCOPE_LOCAL_CPU);
3571 }
3572
3573 return false;
3574 }
3575 EXPORT_SYMBOL_GPL(this_cpu_has_cap);
3576
3577 /*
3578 * This helper function is used in a narrow window when,
3579 * - The system wide safe registers are set with all the SMP CPUs and,
3580 * - The SYSTEM_FEATURE system_cpucaps may not have been set.
3581 */
__system_matches_cap(unsigned int n)3582 static bool __maybe_unused __system_matches_cap(unsigned int n)
3583 {
3584 if (n < ARM64_NCAPS) {
3585 const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n];
3586
3587 if (cap)
3588 return cap->matches(cap, SCOPE_SYSTEM);
3589 }
3590 return false;
3591 }
3592
cpu_set_feature(unsigned int num)3593 void cpu_set_feature(unsigned int num)
3594 {
3595 set_bit(num, elf_hwcap);
3596 }
3597
cpu_have_feature(unsigned int num)3598 bool cpu_have_feature(unsigned int num)
3599 {
3600 return test_bit(num, elf_hwcap);
3601 }
3602 EXPORT_SYMBOL_GPL(cpu_have_feature);
3603
cpu_get_elf_hwcap(void)3604 unsigned long cpu_get_elf_hwcap(void)
3605 {
3606 /*
3607 * We currently only populate the first 32 bits of AT_HWCAP. Please
3608 * note that for userspace compatibility we guarantee that bits 62
3609 * and 63 will always be returned as 0.
3610 */
3611 return elf_hwcap[0];
3612 }
3613
cpu_get_elf_hwcap2(void)3614 unsigned long cpu_get_elf_hwcap2(void)
3615 {
3616 return elf_hwcap[1];
3617 }
3618
setup_boot_cpu_capabilities(void)3619 static void __init setup_boot_cpu_capabilities(void)
3620 {
3621 /*
3622 * The boot CPU's feature register values have been recorded. Detect
3623 * boot cpucaps and local cpucaps for the boot CPU, then enable and
3624 * patch alternatives for the available boot cpucaps.
3625 */
3626 update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
3627 enable_cpu_capabilities(SCOPE_BOOT_CPU);
3628 apply_boot_alternatives();
3629 }
3630
setup_boot_cpu_features(void)3631 void __init setup_boot_cpu_features(void)
3632 {
3633 /*
3634 * Initialize the indirect array of CPU capabilities pointers before we
3635 * handle the boot CPU.
3636 */
3637 init_cpucap_indirect_list();
3638
3639 /*
3640 * Detect broken pseudo-NMI. Must be called _before_ the call to
3641 * setup_boot_cpu_capabilities() since it interacts with
3642 * can_use_gic_priorities().
3643 */
3644 detect_system_supports_pseudo_nmi();
3645
3646 setup_boot_cpu_capabilities();
3647 }
3648
setup_system_capabilities(void)3649 static void __init setup_system_capabilities(void)
3650 {
3651 /*
3652 * The system-wide safe feature register values have been finalized.
3653 * Detect, enable, and patch alternatives for the available system
3654 * cpucaps.
3655 */
3656 update_cpu_capabilities(SCOPE_SYSTEM);
3657 enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3658 apply_alternatives_all();
3659
3660 /*
3661 * Log any cpucaps with a cpumask as these aren't logged by
3662 * update_cpu_capabilities().
3663 */
3664 for (int i = 0; i < ARM64_NCAPS; i++) {
3665 const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
3666
3667 if (caps && caps->cpus && caps->desc &&
3668 cpumask_any(caps->cpus) < nr_cpu_ids)
3669 pr_info("detected: %s on CPU%*pbl\n",
3670 caps->desc, cpumask_pr_args(caps->cpus));
3671 }
3672
3673 /*
3674 * TTBR0 PAN doesn't have its own cpucap, so log it manually.
3675 */
3676 if (system_uses_ttbr0_pan())
3677 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
3678 }
3679
setup_system_features(void)3680 void __init setup_system_features(void)
3681 {
3682 setup_system_capabilities();
3683
3684 kpti_install_ng_mappings();
3685
3686 sve_setup();
3687 sme_setup();
3688
3689 /*
3690 * Check for sane CTR_EL0.CWG value.
3691 */
3692 if (!cache_type_cwg())
3693 pr_warn("No Cache Writeback Granule information, assuming %d\n",
3694 ARCH_DMA_MINALIGN);
3695 }
3696
setup_user_features(void)3697 void __init setup_user_features(void)
3698 {
3699 user_feature_fixup();
3700
3701 setup_elf_hwcaps(arm64_elf_hwcaps);
3702
3703 if (system_supports_32bit_el0()) {
3704 setup_elf_hwcaps(compat_elf_hwcaps);
3705 elf_hwcap_fixup();
3706 }
3707
3708 minsigstksz_setup();
3709 }
3710
enable_mismatched_32bit_el0(unsigned int cpu)3711 static int enable_mismatched_32bit_el0(unsigned int cpu)
3712 {
3713 /*
3714 * The first 32-bit-capable CPU we detected and so can no longer
3715 * be offlined by userspace. -1 indicates we haven't yet onlined
3716 * a 32-bit-capable CPU.
3717 */
3718 static int lucky_winner = -1;
3719
3720 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
3721 bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
3722
3723 if (cpu_32bit) {
3724 cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
3725 static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
3726 }
3727
3728 if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
3729 return 0;
3730
3731 if (lucky_winner >= 0)
3732 return 0;
3733
3734 /*
3735 * We've detected a mismatch. We need to keep one of our CPUs with
3736 * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
3737 * every CPU in the system for a 32-bit task.
3738 */
3739 lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
3740 cpu_active_mask);
3741 get_cpu_device(lucky_winner)->offline_disabled = true;
3742 setup_elf_hwcaps(compat_elf_hwcaps);
3743 elf_hwcap_fixup();
3744 pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
3745 cpu, lucky_winner);
3746 return 0;
3747 }
3748
init_32bit_el0_mask(void)3749 static int __init init_32bit_el0_mask(void)
3750 {
3751 if (!allow_mismatched_32bit_el0)
3752 return 0;
3753
3754 if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
3755 return -ENOMEM;
3756
3757 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
3758 "arm64/mismatched_32bit_el0:online",
3759 enable_mismatched_32bit_el0, NULL);
3760 }
3761 subsys_initcall_sync(init_32bit_el0_mask);
3762
cpu_enable_cnp(struct arm64_cpu_capabilities const * cap)3763 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
3764 {
3765 cpu_enable_swapper_cnp();
3766 }
3767
3768 /*
3769 * We emulate only the following system register space.
3770 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7]
3771 * See Table C5-6 System instruction encodings for System register accesses,
3772 * ARMv8 ARM(ARM DDI 0487A.f) for more details.
3773 */
is_emulated(u32 id)3774 static inline bool __attribute_const__ is_emulated(u32 id)
3775 {
3776 return (sys_reg_Op0(id) == 0x3 &&
3777 sys_reg_CRn(id) == 0x0 &&
3778 sys_reg_Op1(id) == 0x0 &&
3779 (sys_reg_CRm(id) == 0 ||
3780 ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7))));
3781 }
3782
3783 /*
3784 * With CRm == 0, reg should be one of :
3785 * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
3786 */
emulate_id_reg(u32 id,u64 * valp)3787 static inline int emulate_id_reg(u32 id, u64 *valp)
3788 {
3789 switch (id) {
3790 case SYS_MIDR_EL1:
3791 *valp = read_cpuid_id();
3792 break;
3793 case SYS_MPIDR_EL1:
3794 *valp = SYS_MPIDR_SAFE_VAL;
3795 break;
3796 case SYS_REVIDR_EL1:
3797 /* IMPLEMENTATION DEFINED values are emulated with 0 */
3798 *valp = 0;
3799 break;
3800 default:
3801 return -EINVAL;
3802 }
3803
3804 return 0;
3805 }
3806
emulate_sys_reg(u32 id,u64 * valp)3807 static int emulate_sys_reg(u32 id, u64 *valp)
3808 {
3809 struct arm64_ftr_reg *regp;
3810
3811 if (!is_emulated(id))
3812 return -EINVAL;
3813
3814 if (sys_reg_CRm(id) == 0)
3815 return emulate_id_reg(id, valp);
3816
3817 regp = get_arm64_ftr_reg_nowarn(id);
3818 if (regp)
3819 *valp = arm64_ftr_reg_user_value(regp);
3820 else
3821 /*
3822 * The untracked registers are either IMPLEMENTATION DEFINED
3823 * (e.g, ID_AFR0_EL1) or reserved RAZ.
3824 */
3825 *valp = 0;
3826 return 0;
3827 }
3828
do_emulate_mrs(struct pt_regs * regs,u32 sys_reg,u32 rt)3829 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
3830 {
3831 int rc;
3832 u64 val;
3833
3834 rc = emulate_sys_reg(sys_reg, &val);
3835 if (!rc) {
3836 pt_regs_write_reg(regs, rt, val);
3837 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
3838 }
3839 return rc;
3840 }
3841
try_emulate_mrs(struct pt_regs * regs,u32 insn)3842 bool try_emulate_mrs(struct pt_regs *regs, u32 insn)
3843 {
3844 u32 sys_reg, rt;
3845
3846 if (compat_user_mode(regs) || !aarch64_insn_is_mrs(insn))
3847 return false;
3848
3849 /*
3850 * sys_reg values are defined as used in mrs/msr instruction.
3851 * shift the imm value to get the encoding.
3852 */
3853 sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
3854 rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
3855 return do_emulate_mrs(regs, sys_reg, rt) == 0;
3856 }
3857
arm64_get_meltdown_state(void)3858 enum mitigation_state arm64_get_meltdown_state(void)
3859 {
3860 if (__meltdown_safe)
3861 return SPECTRE_UNAFFECTED;
3862
3863 if (arm64_kernel_unmapped_at_el0())
3864 return SPECTRE_MITIGATED;
3865
3866 return SPECTRE_VULNERABLE;
3867 }
3868
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3869 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
3870 char *buf)
3871 {
3872 switch (arm64_get_meltdown_state()) {
3873 case SPECTRE_UNAFFECTED:
3874 return sprintf(buf, "Not affected\n");
3875
3876 case SPECTRE_MITIGATED:
3877 return sprintf(buf, "Mitigation: PTI\n");
3878
3879 default:
3880 return sprintf(buf, "Vulnerable\n");
3881 }
3882 }
3883