Lines Matching +full:4 +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU feature definitions
9 * there's a little bit of over-abstraction that tends to obscure what's going
14 * user-visible instructions are available only on a subset of the available
16 * boot CPU and comparing these with the feature registers of each secondary
17 * CPU when bringing them up. If there is a mismatch, then we update the
18 * snapshot state to indicate the lowest-common denominator of the feature,
27 * may prevent a CPU from being onlined at all.
31 * - Mismatched features are *always* sanitised to a "safe" value, which
34 * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35 * warning when onlining an offending CPU and the kernel will be tainted
38 * - Features marked as FTR_VISIBLE have their sanitised value visible to
43 * - A "feature" is typically a 4-bit register field. A "capability" is the
44 * high-level description derived from the sanitised field value.
46 * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
50 * - KVM exposes its own view of the feature registers to guest operating
57 * - If the arm64_ftr_bits[] for a register has a missing field, then this
63 #define pr_fmt(fmt) "CPU features: " fmt
73 #include <linux/cpu.h>
75 #include <asm/cpu.h>
116 * will be used to determine if a new booting CPU should
120 * the fast path for checking constant CPU caps.
131 /* file-wide pr_fmt adds "CPU features: " prefix */ in dump_cpu_features()
172 * sync with the documentation of the CPU feature register ABI.
175 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
176 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
177 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
178 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
179 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
180 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
181 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
182 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
183 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
184 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
185 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
186 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
187 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
188 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
193 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
194 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
195 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
197 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
198 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
200 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
202 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
203 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
204 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
205 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
207 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
209 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
210 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
215 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
216 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
221 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
222 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
223 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
224 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
225 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
226 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
228 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
229 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
230 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
231 …S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_…
232 …S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_…
233 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
234 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
235 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL…
236 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL…
241 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
242 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
244 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
245 …ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_…
247 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
253 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
255 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
257 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
259 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
261 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
263 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
265 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
267 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
269 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
274 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
275 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
276 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
278 * Page size not being supported at Stage-2 is not fatal. You
283 * advertises a given granule size at Stage-2 (value 2) on some
284 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
292 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
293 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
294 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
300 * along with it and treat them as non-strict.
302 …S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64M…
303 …S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64…
304 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MM…
306 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
308 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
309 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
310 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
315 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
320 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
321 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
322 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
323 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
324 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
325 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
326 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
327 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
328 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
329 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
330 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
335 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
336 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
337 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
338 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
339 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
340 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
341 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
342 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
343 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
344 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
345 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
346 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
347 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
348 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
349 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
357 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
358 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
359 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
361 * Linux can handle differing I-cache policies. Userspace JITs will
363 * If we have differing I-cache policies, report it as the weakest - VIPT.
366 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
376 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
377 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
378 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
379 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
380 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
381 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
382 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
383 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
388 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
389 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
390 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
391 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
392 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
397 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
398 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
403 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
404 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
410 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
415 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
416 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
417 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
418 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
419 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
420 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
421 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
426 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
427 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
428 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
429 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
430 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
431 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
436 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
437 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
438 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
439 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
440 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
441 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
442 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
450 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
455 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
456 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
457 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
458 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
459 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
460 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
461 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
462 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
467 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
472 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
473 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
474 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
475 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
476 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
477 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
478 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
483 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
484 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
485 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
486 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
487 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
488 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
493 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
494 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
495 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
496 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
497 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
498 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
499 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
500 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
505 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
506 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
512 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
513 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
514 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
515 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
516 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
517 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
518 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
523 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
535 * attributes, with 4bit feature fields and a default safe value of
537 * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
540 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
541 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
542 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
543 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
544 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
545 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
546 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
547 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
600 /* Op1 = 0, CRn = 0, CRm = 4 */
632 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; in search_cmp_ftr_reg()
636 * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
641 * returns - Upon success, matching ftr_reg entry for id.
642 * - NULL on failure. It is upto the caller to decide
655 return ret->reg; in get_arm64_ftr_reg_nowarn()
660 * get_arm64_ftr_reg - Looks up a feature register entry using
663 * returns - Upon success, matching ftr_reg entry for id.
664 * - NULL on failure but with an WARN_ON().
673 * Requesting a non-existent register search is an error. Warn in get_arm64_ftr_reg()
686 reg |= (ftr_val << ftrp->shift) & mask; in arm64_ftr_set_value()
695 switch (ftrp->type) { in arm64_ftr_safe_value()
697 ret = ftrp->safe_val; in arm64_ftr_safe_value()
722 const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; in sort_ftr_regs()
729 for (; ftr_bits->width != 0; ftr_bits++, j++) { in sort_ftr_regs()
730 unsigned int width = ftr_reg->ftr_bits[j].width; in sort_ftr_regs()
731 unsigned int shift = ftr_reg->ftr_bits[j].shift; in sort_ftr_regs()
736 ftr_reg->name, shift); in sort_ftr_regs()
745 prev_shift = ftr_reg->ftr_bits[j - 1].shift; in sort_ftr_regs()
748 ftr_reg->name, shift); in sort_ftr_regs()
762 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); in sort_ftr_regs()
767 * Initialise the CPU feature register from Boot CPU values.
770 * RES0 for the system-wide value, and must strictly match.
785 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in init_cpu_ftr_reg()
792 if (!ftrp->strict) in init_cpu_ftr_reg()
794 if (ftrp->visible) in init_cpu_ftr_reg()
797 reg->user_val = arm64_ftr_set_value(ftrp, in init_cpu_ftr_reg()
798 reg->user_val, in init_cpu_ftr_reg()
799 ftrp->safe_val); in init_cpu_ftr_reg()
804 reg->sys_val = val; in init_cpu_ftr_reg()
805 reg->strict_mask = strict_mask; in init_cpu_ftr_reg()
806 reg->user_mask = user_mask; in init_cpu_ftr_reg()
815 for (; caps->matches; caps++) { in init_cpu_hwcaps_indirect_list_from_array()
816 if (WARN(caps->capability >= ARM64_NCAPS, in init_cpu_hwcaps_indirect_list_from_array()
817 "Invalid capability %d\n", caps->capability)) in init_cpu_hwcaps_indirect_list_from_array()
819 if (WARN(cpu_hwcaps_ptrs[caps->capability], in init_cpu_hwcaps_indirect_list_from_array()
821 caps->capability)) in init_cpu_hwcaps_indirect_list_from_array()
823 cpu_hwcaps_ptrs[caps->capability] = caps; in init_cpu_hwcaps_indirect_list_from_array()
840 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); in init_cpu_features()
841 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); in init_cpu_features()
842 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); in init_cpu_features()
843 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); in init_cpu_features()
844 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); in init_cpu_features()
845 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); in init_cpu_features()
846 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); in init_cpu_features()
847 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); in init_cpu_features()
848 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); in init_cpu_features()
849 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); in init_cpu_features()
850 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); in init_cpu_features()
851 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); in init_cpu_features()
852 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); in init_cpu_features()
853 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); in init_cpu_features()
855 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { in init_cpu_features()
856 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); in init_cpu_features()
857 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); in init_cpu_features()
858 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); in init_cpu_features()
859 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); in init_cpu_features()
860 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); in init_cpu_features()
861 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); in init_cpu_features()
862 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); in init_cpu_features()
863 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); in init_cpu_features()
864 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); in init_cpu_features()
865 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); in init_cpu_features()
866 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); in init_cpu_features()
867 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); in init_cpu_features()
868 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); in init_cpu_features()
869 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); in init_cpu_features()
870 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); in init_cpu_features()
871 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); in init_cpu_features()
872 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); in init_cpu_features()
873 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); in init_cpu_features()
874 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); in init_cpu_features()
875 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); in init_cpu_features()
876 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); in init_cpu_features()
879 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { in init_cpu_features()
880 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); in init_cpu_features()
885 * Initialize the indirect array of CPU hwcaps capabilities pointers in init_cpu_features()
886 * before we handle the boot CPU below. in init_cpu_features()
891 * Detect and enable early CPU capabilities based on the boot CPU, in init_cpu_features()
892 * after we have initialised the CPU feature infrastructure. in init_cpu_features()
901 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in update_cpu_ftr_reg()
902 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); in update_cpu_ftr_reg()
909 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); in update_cpu_ftr_reg()
914 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) in check_update_ftr_reg() argument
922 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) in check_update_ftr_reg()
924 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", in check_update_ftr_reg()
925 regp->name, boot, cpu, val); in check_update_ftr_reg()
937 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { in relax_cpu_ftr_reg()
938 if (ftrp->shift == field) { in relax_cpu_ftr_reg()
939 regp->strict_mask &= ~arm64_ftr_mask(ftrp); in relax_cpu_ftr_reg()
945 WARN_ON(!ftrp->width); in relax_cpu_ftr_reg()
948 static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info, in update_32bit_cpu_features() argument
964 * EL1-dependent register fields to avoid spurious sanity check fails. in update_32bit_cpu_features()
975 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, in update_32bit_cpu_features()
976 info->reg_id_dfr0, boot->reg_id_dfr0); in update_32bit_cpu_features()
977 taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu, in update_32bit_cpu_features()
978 info->reg_id_dfr1, boot->reg_id_dfr1); in update_32bit_cpu_features()
979 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, in update_32bit_cpu_features()
980 info->reg_id_isar0, boot->reg_id_isar0); in update_32bit_cpu_features()
981 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, in update_32bit_cpu_features()
982 info->reg_id_isar1, boot->reg_id_isar1); in update_32bit_cpu_features()
983 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, in update_32bit_cpu_features()
984 info->reg_id_isar2, boot->reg_id_isar2); in update_32bit_cpu_features()
985 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, in update_32bit_cpu_features()
986 info->reg_id_isar3, boot->reg_id_isar3); in update_32bit_cpu_features()
987 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, in update_32bit_cpu_features()
988 info->reg_id_isar4, boot->reg_id_isar4); in update_32bit_cpu_features()
989 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, in update_32bit_cpu_features()
990 info->reg_id_isar5, boot->reg_id_isar5); in update_32bit_cpu_features()
991 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, in update_32bit_cpu_features()
992 info->reg_id_isar6, boot->reg_id_isar6); in update_32bit_cpu_features()
999 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, in update_32bit_cpu_features()
1000 info->reg_id_mmfr0, boot->reg_id_mmfr0); in update_32bit_cpu_features()
1001 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, in update_32bit_cpu_features()
1002 info->reg_id_mmfr1, boot->reg_id_mmfr1); in update_32bit_cpu_features()
1003 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, in update_32bit_cpu_features()
1004 info->reg_id_mmfr2, boot->reg_id_mmfr2); in update_32bit_cpu_features()
1005 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, in update_32bit_cpu_features()
1006 info->reg_id_mmfr3, boot->reg_id_mmfr3); in update_32bit_cpu_features()
1007 taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu, in update_32bit_cpu_features()
1008 info->reg_id_mmfr4, boot->reg_id_mmfr4); in update_32bit_cpu_features()
1009 taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu, in update_32bit_cpu_features()
1010 info->reg_id_mmfr5, boot->reg_id_mmfr5); in update_32bit_cpu_features()
1011 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, in update_32bit_cpu_features()
1012 info->reg_id_pfr0, boot->reg_id_pfr0); in update_32bit_cpu_features()
1013 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, in update_32bit_cpu_features()
1014 info->reg_id_pfr1, boot->reg_id_pfr1); in update_32bit_cpu_features()
1015 taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu, in update_32bit_cpu_features()
1016 info->reg_id_pfr2, boot->reg_id_pfr2); in update_32bit_cpu_features()
1017 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, in update_32bit_cpu_features()
1018 info->reg_mvfr0, boot->reg_mvfr0); in update_32bit_cpu_features()
1019 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, in update_32bit_cpu_features()
1020 info->reg_mvfr1, boot->reg_mvfr1); in update_32bit_cpu_features()
1021 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, in update_32bit_cpu_features()
1022 info->reg_mvfr2, boot->reg_mvfr2); in update_32bit_cpu_features()
1028 * Update system wide CPU feature registers with the values from a
1029 * non-boot CPU. Also performs SANITY checks to make sure that there
1030 * aren't any insane variations from that of the boot CPU.
1032 void update_cpu_features(int cpu, in update_cpu_features() argument
1039 * The kernel can handle differing I-cache policies, but otherwise in update_cpu_features()
1043 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, in update_cpu_features()
1044 info->reg_ctr, boot->reg_ctr); in update_cpu_features()
1051 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, in update_cpu_features()
1052 info->reg_dczid, boot->reg_dczid); in update_cpu_features()
1055 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, in update_cpu_features()
1056 info->reg_cntfrq, boot->reg_cntfrq); in update_cpu_features()
1059 * The kernel uses self-hosted debug features and expects CPUs to in update_cpu_features()
1064 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, in update_cpu_features()
1065 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); in update_cpu_features()
1066 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, in update_cpu_features()
1067 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); in update_cpu_features()
1069 * Even in big.LITTLE, processors should be identical instruction-set in update_cpu_features()
1072 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, in update_cpu_features()
1073 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); in update_cpu_features()
1074 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, in update_cpu_features()
1075 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); in update_cpu_features()
1076 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, in update_cpu_features()
1077 info->reg_id_aa64isar2, boot->reg_id_aa64isar2); in update_cpu_features()
1084 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, in update_cpu_features()
1085 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); in update_cpu_features()
1086 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, in update_cpu_features()
1087 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); in update_cpu_features()
1088 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, in update_cpu_features()
1089 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); in update_cpu_features()
1091 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, in update_cpu_features()
1092 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); in update_cpu_features()
1093 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, in update_cpu_features()
1094 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); in update_cpu_features()
1096 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, in update_cpu_features()
1097 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); in update_cpu_features()
1099 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { in update_cpu_features()
1100 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, in update_cpu_features()
1101 info->reg_zcr, boot->reg_zcr); in update_cpu_features()
1113 taint |= update_32bit_cpu_features(cpu, info, boot); in update_cpu_features()
1116 * Mismatched CPU features are a recipe for disaster. Don't even in update_cpu_features()
1120 pr_warn_once("Unsupported CPU feature variation detected.\n"); in update_cpu_features()
1131 return regp->sys_val; in read_sanitised_ftr_reg()
1139 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1140 * Read the system register on the current CPU
1189 #include <linux/irqchip/arm-gic-v3.h>
1194 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign); in feature_matches()
1196 return val >= entry->min_field_value; in feature_matches()
1206 val = read_sanitised_ftr_reg(entry->sys_reg); in has_cpuid_feature()
1208 val = __read_sysreg_by_encoding(entry->sys_reg); in has_cpuid_feature()
1223 entry->desc); in has_useable_gicv3_cpuif()
1262 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively in cpu_emulate_effective_ctr()
1264 * to the CTR_EL0 on this CPU and emulate it with the real/safe in cpu_emulate_effective_ctr()
1288 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP in has_useable_cnp()
1289 * may share TLB entries with a CPU stuck in the crashed in has_useable_cnp()
1300 * is initialised. Checking the status on the local CPU allows the boot
1301 * CPU to detect the need for non-global mappings and thus avoiding a
1302 * pagetable re-write after all the CPUs are booted. This check will be
1304 * state once the SMP CPUs are up and thus make the switch to non-global
1368 /* Defer to CPU feature registers */ in unmap_kernel_at_el0()
1377 * ThunderX leads to apparent I-cache corruption of kernel text, which in unmap_kernel_at_el0()
1382 __kpti_forced = -1; in unmap_kernel_at_el0()
1395 __kpti_forced = -1; in unmap_kernel_at_el0()
1421 int cpu = smp_processor_id(); in kpti_install_ng_mappings() local
1430 * We don't need to rewrite the page-tables if either we've done in kpti_install_ng_mappings()
1440 remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); in kpti_install_ng_mappings()
1443 if (!cpu) in kpti_install_ng_mappings()
1463 __kpti_forced = enabled ? 1 : -1; in parse_kpti()
1510 * DBM is a non-conflicting feature. i.e, the kernel can safely in has_hw_dbm()
1512 * unconditionally enable the capability to allow any late CPU in has_hw_dbm()
1514 * CPU, if it actually supports. in has_hw_dbm()
1517 * when at least one CPU actually uses it. So check if this CPU in has_hw_dbm()
1520 * This is safe as all CPUs (including secondary CPUs - due to the in has_hw_dbm()
1521 * LOCAL_CPU scope - and the hotplugged CPUs - via verification) in has_hw_dbm()
1522 * goes through the "matches" check exactly once. Also if a CPU in has_hw_dbm()
1523 * matches the criteria, it is guaranteed that the CPU will turn in has_hw_dbm()
1539 * The "amu_cpus" cpumask only signals that the CPU implementation for the
1541 * information regarding all the events that it supports. When a CPU bit is
1543 * of the 4 fixed counters for that CPU. But this does not guarantee that the
1549 bool cpu_has_amu_feat(int cpu) in cpu_has_amu_feat() argument
1551 return cpumask_test_cpu(cpu, &amu_cpus); in cpu_has_amu_feat()
1560 pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", in cpu_amu_enable()
1574 * The AMU extension is a non-conflicting feature: the kernel can in has_amu()
1577 * the capability to allow any late CPU to use the feature. in has_amu()
1582 * present on that respective CPU. The enable function will also in has_amu()
1603 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to in cpu_copy_el2regs()
1649 * The ptr-auth feature levels are not intercompatible with lower in has_address_auth_cpucap()
1650 * levels. Hence we must match ptr-auth feature level of the secondary in has_address_auth_cpucap()
1651 * CPUs with that of the boot CPU. The level of boot cpu is fetched in has_address_auth_cpucap()
1655 * boot CPU as a mismatched secondary CPU is parked before it gets in has_address_auth_cpucap()
1658 boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), in has_address_auth_cpucap()
1659 entry->field_pos, entry->sign); in has_address_auth_cpucap()
1661 return boot_val >= entry->min_field_value; in has_address_auth_cpucap()
1663 sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), in has_address_auth_cpucap()
1664 entry->field_pos, entry->sign); in has_address_auth_cpucap()
1711 * Use of X16/X17 for tail-calls and trampolines that jump to in bti_enable()
1729 if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags)) in cpu_enable_mte()
1742 /* Internal helper functions to match cpu capability type */
1746 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); in cpucap_late_cpu_optional()
1752 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); in cpucap_late_cpu_permitted()
1758 return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); in cpucap_panic_on_conflict()
1763 .desc = "GIC system register CPU interface",
1835 .desc = "32-bit EL0 Support",
1846 .desc = "32-bit EL1 Support",
1862 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1930 * message to be shown until at least one CPU is detected to
1957 .desc = "Stage-2 Force Write-Back",
1968 .desc = "ARMv8.4 Translation Table Level",
2302 * check is future proof, by making sure value is non-zero. in compat_has_neon()
2336 switch (cap->hwcap_type) { in cap_set_elf_hwcap()
2338 cpu_set_feature(cap->hwcap); in cap_set_elf_hwcap()
2342 compat_elf_hwcap |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2345 compat_elf_hwcap2 |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2359 switch (cap->hwcap_type) { in cpus_have_elf_hwcap()
2361 rc = cpu_have_feature(cap->hwcap); in cpus_have_elf_hwcap()
2365 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2368 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2381 /* We support emulation of accesses to CPU ID feature registers */ in setup_elf_hwcaps()
2383 for (; hwcaps->matches; hwcaps++) in setup_elf_hwcaps()
2384 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) in setup_elf_hwcaps()
2396 if (!caps || !(caps->type & scope_mask) || in update_cpu_capabilities()
2397 cpus_have_cap(caps->capability) || in update_cpu_capabilities()
2398 !caps->matches(caps, cpucap_default_scope(caps))) in update_cpu_capabilities()
2401 if (caps->desc) in update_cpu_capabilities()
2402 pr_info("detected: %s\n", caps->desc); in update_cpu_capabilities()
2403 cpus_set_cap(caps->capability); in update_cpu_capabilities()
2405 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) in update_cpu_capabilities()
2406 set_bit(caps->capability, boot_capabilities); in update_cpu_capabilities()
2411 * Enable all the available capabilities on this CPU. The capabilities
2425 if (!(cap->type & non_boot_scope)) in cpu_enable_non_boot_scope_capabilities()
2428 if (cap->cpu_enable) in cpu_enable_non_boot_scope_capabilities()
2429 cap->cpu_enable(cap); in cpu_enable_non_boot_scope_capabilities()
2451 if (!caps || !(caps->type & scope_mask)) in enable_cpu_capabilities()
2453 num = caps->capability; in enable_cpu_capabilities()
2460 if (boot_scope && caps->cpu_enable) in enable_cpu_capabilities()
2463 * before any secondary CPU boots. Thus, each secondary in enable_cpu_capabilities()
2466 * the boot CPU, for which the capability must be in enable_cpu_capabilities()
2470 caps->cpu_enable(caps); in enable_cpu_capabilities()
2474 * For all non-boot scope capabilities, use stop_machine() in enable_cpu_capabilities()
2487 * action on this CPU.
2499 if (!caps || !(caps->type & scope_mask)) in verify_local_cpu_caps()
2502 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); in verify_local_cpu_caps()
2503 system_has_cap = cpus_have_cap(caps->capability); in verify_local_cpu_caps()
2507 * Check if the new CPU misses an advertised feature, in verify_local_cpu_caps()
2514 * whether the CPU has it or not, as it is enabeld in verify_local_cpu_caps()
2516 * appropriate action on this CPU. in verify_local_cpu_caps()
2518 if (caps->cpu_enable) in verify_local_cpu_caps()
2519 caps->cpu_enable(caps); in verify_local_cpu_caps()
2522 * Check if the CPU has this capability if it isn't in verify_local_cpu_caps()
2531 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", in verify_local_cpu_caps()
2532 smp_processor_id(), caps->capability, in verify_local_cpu_caps()
2533 caps->desc, system_has_cap, cpu_has_cap); in verify_local_cpu_caps()
2543 * Check for CPU features that are used in early boot
2544 * based on the Boot CPU value.
2557 for (; caps->matches; caps++) in verify_local_elf_hwcaps()
2558 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { in verify_local_elf_hwcaps()
2559 pr_crit("CPU%d: missing HWCAP: %s\n", in verify_local_elf_hwcaps()
2560 smp_processor_id(), caps->desc); in verify_local_elf_hwcaps()
2574 pr_crit("CPU%d: SVE: vector length support mismatch\n", in verify_sve_features()
2599 pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id()); in verify_hyp_capabilities()
2608 pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); in verify_hyp_capabilities()
2614 * Run through the enabled system capabilities and enable() it on this CPU.
2616 * Any new CPU should match the system wide status of the capability. If the
2617 * new CPU doesn't have a capability which the system now has enabled, we
2619 * we park the CPU.
2645 * All secondary CPUs should conform to the early CPU features in check_local_cpu_capabilities()
2646 * in use by the kernel based on boot CPU. in check_local_cpu_capabilities()
2651 * If we haven't finalised the system capabilities, this CPU gets in check_local_cpu_capabilities()
2653 * Otherwise, this CPU should verify that it has all the system in check_local_cpu_capabilities()
2676 return cap->matches(cap, SCOPE_LOCAL_CPU); in this_cpu_has_cap()
2684 * - The system wide safe registers are set with all the SMP CPUs and,
2685 * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2694 return cap->matches(cap, SCOPE_SYSTEM); in __system_matches_cap()
2731 * We have finalised the system-wide safe feature in setup_system_capabilities()
2783 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
2784 * See Table C5-6 System instruction encodings for System register accesses,
2793 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7)))); in is_emulated()
2814 return -EINVAL; in emulate_id_reg()
2825 return -EINVAL; in emulate_sys_reg()